id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,000 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/pollerlink.py
|
alignak.objects.pollerlink.PollerLinks
|
class PollerLinks(SatelliteLinks):
"""
Class to manage list of PollerLink.
PollerLinks is used to regroup all links between the Arbiter and different Pollers
"""
name_property = "poller_name"
inner_class = PollerLink
|
class PollerLinks(SatelliteLinks):
'''
Class to manage list of PollerLink.
PollerLinks is used to regroup all links between the Arbiter and different Pollers
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.33 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 7 | 0 | 3 | 3 | 2 | 4 | 3 | 3 | 2 | 0 | 3 | 0 | 0 |
4,001 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_statsd.py
|
tests.test_statsd.TestStatsD
|
class TestStatsD(AlignakTest):
"""
This class test the StatsD interface
"""
def setUp(self):
super(TestStatsD, self).setUp()
# Log at DEBUG level
self.set_unit_tests_logger_level('INFO')
self.show_logs()
self.clear_logs()
# Create our own stats manager...
# do not use the global object to restart with a fresh one on each test
self.statsmgr = Stats()
self.fake_statsd = FakeStatsdServer(port=8125)
def tearDown(self):
self.fake_statsd.stop()
self.fake_statsd.join()
def test_statsmgr(self):
""" Stats manager exists
:return:
"""
assert 'statsmgr' in globals()
def test_statsmgr_register_disabled(self):
""" Stats manager is registered as disabled
:return:
"""
# Register stats manager as disabled
assert not self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=False)
assert self.statsmgr.statsd_enabled is False
assert self.statsmgr.broks_enabled is False
assert self.statsmgr.statsd_sock is None
def test_statsmgr_register_disabled_broks(self):
""" Stats manager is registered as disabled, but broks are enabled
:return:
"""
# Register stats manager as disabled
assert not self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=False,
broks_enabled=True)
assert self.statsmgr.statsd_enabled is False
assert self.statsmgr.broks_enabled is True
assert self.statsmgr.statsd_sock is None
assert self.statsmgr.statsd_addr is None
def test_statsmgr_register_enabled(self):
""" Stats manager is registered as enabled
:return:
"""
# Register stats manager as enabled
assert self.statsmgr.statsd_sock is None
assert self.statsmgr.statsd_addr is None
assert self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=True)
assert self.statsmgr.statsd_enabled is True
assert self.statsmgr.broks_enabled is False
assert self.statsmgr.statsd_sock is not None
assert self.statsmgr.statsd_addr is not None
index = 0
self.assert_log_match(re.escape(
'Sending arbiter-master statistics to: localhost:8125, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
def test_statsmgr_register_enabled_broks(self):
""" Stats manager is registered as enabled and broks are enabled
:return:
"""
# Register stats manager as enabled
assert self.statsmgr.statsd_sock is None
assert self.statsmgr.statsd_addr is None
assert self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=True,
broks_enabled=True)
assert self.statsmgr.statsd_enabled is True
assert self.statsmgr.broks_enabled is True
assert self.statsmgr.statsd_sock is not None
assert self.statsmgr.statsd_addr is not None
index = 0
self.assert_log_match(re.escape(
'Sending arbiter-master statistics to: localhost:8125, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
def test_statsmgr_connect(self):
""" Test connection in disabled mode
:return:
"""
# Register stats manager as disabled
assert not self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=False)
# Connect to StatsD server
assert self.statsmgr.statsd_sock is None
assert self.statsmgr.statsd_addr is None
# This method is not usually called directly, but it must refuse the connection
# if it not enabled
assert not self.statsmgr.load_statsd()
assert self.statsmgr.statsd_sock is None
assert self.statsmgr.statsd_addr is None
def test_statsmgr_connect_port_error(self):
""" Test connection with a bad port
:return:
"""
# Register stats manager as enabled (another port than the default one)
assert self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8888,
statsd_prefix='alignak', statsd_enabled=True)
index = 0
self.assert_log_match(re.escape(
'Sending arbiter-master statistics to: localhost:8888, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are sent to StatsD.'
), index)
index += 1
# "Connected" to StatsD server - even with a bad port number!
self.assert_no_log_match('Cannot create StatsD socket')
def test_statsmgr_timer(self):
""" Test sending data for a timer
:return:
"""
# Register stats manager as enabled
self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=True,
broks_enabled=True)
index = 0
# # Only for Python > 2.7, DEBUG logs ...
# if os.sys.version_info > (2, 7):
# index = 1
self.show_logs()
self.assert_log_match(re.escape(
'Sending arbiter-master statistics to: localhost:8125, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are sent to StatsD.'
), index)
index += 1
assert self.statsmgr.stats == {}
# Create a metric statistic
brok = self.statsmgr.timer('test', 0)
assert len(self.statsmgr.stats) == 1
# Get min, max, count and sum
assert self.statsmgr.stats['test'] == (0, 0, 1, 0)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:0|ms'
# ), 3)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'timer',
'metric': 'alignak.arbiter-master.test',
'value': 0, 'uom': 'ms'
}}
# Increment
brok = self.statsmgr.timer('test', 1)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum
assert self.statsmgr.stats['test'] == (0, 1, 2, 1)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:1000|ms'
# ), 4)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'timer',
'metric': 'alignak.arbiter-master.test',
'value': 1000, 'uom': 'ms'
}}
# Increment - the function is called 'incr' but it does not increment, it sets the value!
brok = self.statsmgr.timer('test', 12)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum (increased)
assert self.statsmgr.stats['test'] == (0, 12, 3, 13)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:1000|ms'
# ), 5)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'timer',
'metric': 'alignak.arbiter-master.test',
'value': 12000, 'uom': 'ms'
}}
def test_statsmgr_counter(self):
""" Test sending data for a counter
:return:
"""
# Register stats manager as enabled
self.statsmgr.register('broker-master', 'broker',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=True,
broks_enabled=True)
index = 0
# # Only for Python > 2.7, DEBUG logs ...
# if os.sys.version_info > (2, 7):
# index = 1
self.show_logs()
self.assert_log_match(re.escape(
'Sending broker-master statistics to: localhost:8125, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are sent to StatsD.'
), index)
index += 1
assert self.statsmgr.stats == {}
# Create a metric statistic
brok = self.statsmgr.counter('test', 0)
assert len(self.statsmgr.stats) == 1
# Get min, max, count and sum
assert self.statsmgr.stats['test'] == (0, 0, 1, 0)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:0|ms'
# ), 3)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'counter',
'metric': 'alignak.broker-master.test',
'value': 0, 'uom': 'c'
}}
# Increment
brok = self.statsmgr.counter('test', 1)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum
assert self.statsmgr.stats['test'] == (0, 1, 2, 1)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:1000|ms'
# ), 4)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'counter',
'metric': 'alignak.broker-master.test',
'value': 1, 'uom': 'c'
}}
# Increment - the function is called 'incr' but it does not increment, it sets the value!
brok = self.statsmgr.counter('test', 12)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum (increased)
assert self.statsmgr.stats['test'] == (0, 12, 3, 13)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:1000|ms'
# ), 5)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'counter',
'metric': 'alignak.broker-master.test',
'value': 12, 'uom': 'c'
}}
def test_statsmgr_gauge(self):
""" Test sending data for a gauge
:return:
"""
# Register stats manager as enabled
self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=True,
broks_enabled=True)
index = 0
# # Only for Python > 2.7, DEBUG logs ...
# if os.sys.version_info > (2, 7):
# index = 1
self.show_logs()
self.assert_log_match(re.escape(
'Sending arbiter-master statistics to: localhost:8125, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are sent to StatsD.'
), index)
index += 1
assert self.statsmgr.stats == {}
# Create a metric statistic
brok = self.statsmgr.gauge('test', 0)
assert len(self.statsmgr.stats) == 1
# Get min, max, count and sum
assert self.statsmgr.stats['test'] == (0, 0, 1, 0)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:0|ms'
# ), 3)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'gauge',
'metric': 'alignak.arbiter-master.test',
'value': 0, 'uom': 'g'
}}
# Increment
brok = self.statsmgr.gauge('test', 1)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum
assert self.statsmgr.stats['test'] == (0, 1, 2, 1)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:1000|ms'
# ), 4)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'gauge',
'metric': 'alignak.arbiter-master.test',
'value': 1, 'uom': 'g'
}}
# Increment - the function is called 'incr' but it does not increment, it sets the value!
brok = self.statsmgr.gauge('test', 12)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum (increased)
assert self.statsmgr.stats['test'] == (0, 12, 3, 13)
# self.assert_log_match(re.escape(
# 'Sending data: alignak.arbiter-master.test:1000|ms'
# ), 5)
# Prepare brok and remove specific brok properties (for test purpose only...
brok.prepare()
brok.__dict__.pop('creation_time')
brok.__dict__.pop('instance_id')
brok.__dict__.pop('prepared')
brok.__dict__.pop('uuid')
brok.__dict__['data'].pop('ts')
assert brok.__dict__ == {'type': 'alignak_stat',
'data': {
'type': 'gauge',
'metric': 'alignak.arbiter-master.test',
'value': 12, 'uom': 'g'
}}
|
class TestStatsD(AlignakTest):
'''
This class test the StatsD interface
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_statsmgr(self):
''' Stats manager exists
:return:
'''
pass
def test_statsmgr_register_disabled(self):
''' Stats manager is registered as disabled
:return:
'''
pass
def test_statsmgr_register_disabled_broks(self):
''' Stats manager is registered as disabled, but broks are enabled
:return:
'''
pass
def test_statsmgr_register_enabled(self):
''' Stats manager is registered as enabled
:return:
'''
pass
def test_statsmgr_register_enabled_broks(self):
''' Stats manager is registered as enabled and broks are enabled
:return:
'''
pass
def test_statsmgr_connect(self):
''' Test connection in disabled mode
:return:
'''
pass
def test_statsmgr_connect_port_error(self):
''' Test connection with a bad port
:return:
'''
pass
def test_statsmgr_timer(self):
''' Test sending data for a timer
:return:
'''
pass
def test_statsmgr_counter(self):
''' Test sending data for a counter
:return:
'''
pass
def test_statsmgr_gauge(self):
''' Test sending data for a gauge
:return:
'''
pass
| 13 | 11 | 37 | 2 | 26 | 9 | 1 | 0.36 | 1 | 3 | 2 | 0 | 12 | 2 | 12 | 67 | 455 | 29 | 314 | 24 | 301 | 112 | 202 | 24 | 189 | 1 | 2 | 0 | 12 |
4,002 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_svc_desc_duplicate_foreach.py
|
tests.test_svc_desc_duplicate_foreach.TestServiceDescriptionDuplicateForEach
|
class TestServiceDescriptionDuplicateForEach(AlignakTest):
def setUp(self):
super(TestServiceDescriptionDuplicateForEach, self).setUp()
self.setup_with_file('cfg/cfg_service_description_duplicate_foreach.cfg')
self._sched = self._scheduler
def test_simple_get_key_value_sequence(self):
rsp = list(generate_key_value_sequences("1", "default42"))
expected = [
{'VALUE': 'default42', 'VALUE1': 'default42', 'KEY': '1'},
]
assert expected == rsp
def test_not_simple_get_key_value_sequence(self):
rsp = list(generate_key_value_sequences("1 $(val1)$, 2 $(val2)$ ", "default42"))
expected = [
{'VALUE': 'val1', 'VALUE1': 'val1', 'KEY': '1'},
{'VALUE': 'val2', 'VALUE1': 'val2', 'KEY': '2'},
]
assert expected == rsp
def test_all_duplicate_ok(self):
host = self._arbiter.conf.hosts.find_by_name("my_host")
services_desc = set(self._arbiter.conf.services[s].service_description for s in host.services)
expected = set(['Generated Service %s' % i for i in range(1, 4)])
assert expected == services_desc
def test_complex(self):
rsp = list(generate_key_value_sequences('Unit [1-6] Port [0-46]$(80%!90%)$,Unit [1-6] Port 47$(80%!90%)$', ''))
assert 288 == len(rsp)
def test_syntax_error_bad_empty_value(self):
generator = generate_key_value_sequences('', '')
with pytest.raises(KeyValueSyntaxError) as ctx:
list(generator)
assert ctx.value.args[0] == "At least one key must be present"
def test_syntax_error_bad_empty_value_with_comma(self):
generator = generate_key_value_sequences(',', '')
with pytest.raises(KeyValueSyntaxError) as ctx:
list(generator)
assert ctx.value.args[0] == "At least one key must be present"
def test_syntax_error_bad_value(self):
generator = generate_key_value_sequences("key $(but bad value: no terminating dollar sign)", '')
with pytest.raises(KeyValueSyntaxError) as ctx:
list(generator)
assert ctx.value.args[0] == "\'key $(but bad value: no terminating dollar sign)\' " \
"is an invalid key(-values) pattern"
|
class TestServiceDescriptionDuplicateForEach(AlignakTest):
def setUp(self):
pass
def test_simple_get_key_value_sequence(self):
pass
def test_not_simple_get_key_value_sequence(self):
pass
def test_all_duplicate_ok(self):
pass
def test_complex(self):
pass
def test_syntax_error_bad_empty_value(self):
pass
def test_syntax_error_bad_empty_value_with_comma(self):
pass
def test_syntax_error_bad_value(self):
pass
| 9 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 5 | 1 | 0 | 8 | 1 | 8 | 63 | 51 | 9 | 42 | 24 | 33 | 0 | 36 | 21 | 27 | 1 | 2 | 1 | 8 |
4,003 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_timeout.py
|
tests.test_timeout.TestWorkerTimeout
|
class TestWorkerTimeout(AlignakTest):
def setUp(self):
super(TestWorkerTimeout, self).setUp()
# we have an external process, so we must un-fake time functions
self.setup_with_file('cfg/cfg_check_worker_timeout.cfg',
dispatching=True)
assert self.conf_is_correct
def test_notification_timeout(self):
""" Test timeout for notification sending
:return:
"""
# Get a test service
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0_timeout")
# These queues connect a poller/reactionner with a worker
to_queue = Queue()
from_queue = Queue()
control_queue = Queue()
# This test script plays the role of the reactionner
# Now we "fork" a worker
w = Worker(1, to_queue, from_queue, 1)
w.uuid = 1
w.i_am_dying = False
# We prepare a notification in the to_queue
contact = Contact({})
contact.contact_name = "alignak"
data = {
'uuid': 1,
'type': 'PROBLEM',
'status': 'scheduled',
'command': 'tests/libexec/sleep_command.sh 7',
'command_call': '',
'ref': svc.uuid,
'contact': '',
't_to_go': 0.0
}
n = Notification(data)
n.status = ACT_STATUS_QUEUED
n.t_to_go = time.time()
n.contact = contact
n.timeout = 2
n.env = {}
n.exit_status = 0
n.module_type = "fork"
# Send the job to the worker
msg = Message(_type='Do', data=n)
to_queue.put(msg)
# Now we simulate the Worker's work() routine. We can't call it
# as w.work() because it is an endless loop
w.checks = []
w.returns_queue = from_queue
w.slave_q = to_queue
for i in range(1, 10):
w.get_new_checks(to_queue, from_queue)
# During the first loop the sleeping command is launched
w.launch_new_checks()
w.manage_finished_checks(from_queue)
time.sleep(1)
# The worker should have finished its job now, either correctly or with a timeout
msg = from_queue.get()
o = msg.get_data()
self.assertEqual(ACT_STATUS_TIMEOUT, o.status)
self.assertEqual(3, o.exit_status)
self.assertLess(o.execution_time, n.timeout+1)
# Let us be a good poller and clean up
to_queue.close()
control_queue.close()
# Now look what the scheduler says about this
self._scheduler.actions[n.uuid] = n
# Fake the scheduler inner results queue ... only for the test!
self._scheduler.manage_results(o)
self.show_logs()
self.assert_any_log_match("Contact alignak service notification command "
"'tests/libexec/sleep_command.sh 7 ' timed out after")
|
class TestWorkerTimeout(AlignakTest):
def setUp(self):
pass
def test_notification_timeout(self):
''' Test timeout for notification sending
:return:
'''
pass
| 3 | 1 | 44 | 7 | 28 | 9 | 2 | 0.3 | 1 | 6 | 4 | 0 | 2 | 0 | 2 | 57 | 89 | 15 | 57 | 14 | 54 | 17 | 45 | 14 | 42 | 2 | 2 | 1 | 3 |
4,004 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_timeperiods.py
|
tests.test_timeperiods.TestTimeperiods
|
class TestTimeperiods(AlignakTest):
def setUp(self):
super(TestTimeperiods, self).setUp()
def test_timeperiod_no_daterange(self):
"""
Test with a timeperiod have no daterange
:return: None
"""
now = time.time()
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, '1999-01-28 00:00-24:00')
t_next = timeperiod.get_next_valid_time_from_t(now)
self.assertIsNone(t_next)
def test_simple_timeperiod(self):
"""
Test a timeperiod with one timerange
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 16:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print(t_next)
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
def test_simple_with_multiple_time(self):
"""
Test timeperiod with 2 ranges:
* tuesday 00:00-07:00
* tuesday 21:30-24:00
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
# Then a simple same day
print("Cheking validity for", time.asctime(time.localtime(july_the_12)))
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 00:00-07:00,21:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("RES:", t_next)
self.assertEqual("Tue Jul 13 00:00:00 2010", t_next)
# Now ask about at 00:00 time?
july_the_12 = time.mktime(time.strptime("12 Jul 2010 00:00:00", "%d %b %Y %H:%M:%S"))
# Then a simple same day
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next?", t_next)
self.assertEqual("Tue Jul 13 00:00:00 2010", t_next)
def test_get_invalid_time(self):
"""
Test get next invalid time
:return: None
"""
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'monday 00:00-24:00')
first_nov = int(time.mktime(time.strptime("1 Nov 2010 00:00:00", "%d %b %Y %H:%M:%S")))
print(first_nov)
end = timeperiod.get_next_invalid_time_from_t(first_nov)
end = time.asctime(time.localtime(end))
self.assertEqual("Tue Nov 2 00:00:00 2010", end)
first_nov = int(time.mktime(time.strptime("2 Nov 2010 00:00:00", "%d %b %Y %H:%M:%S")))
print(first_nov)
end = timeperiod.get_next_invalid_time_from_t(first_nov)
end = time.asctime(time.localtime(end))
self.assertEqual("Tue Nov 2 00:00:00 2010", end)
def test_get_invalid_time_with_exclude(self):
"""
Test get next invalid time with exclude
:return: None
"""
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'monday 00:00-24:00')
t2 = Timeperiod({})
t2.resolve_daterange(t2.dateranges, 'monday 08:30-21:00')
timeperiod.exclude = [t2]
first_nov = int(time.mktime(time.strptime("1 Nov 2010 00:00:00", "%d %b %Y %H:%M:%S")))
print(first_nov)
end = timeperiod.get_next_invalid_time_from_t(first_nov)
end = time.asctime(time.localtime(end))
self.assertEqual("Mon Nov 1 08:30:00 2010", end)
second_nov = int(time.mktime(time.strptime("2 Nov 2010 00:00:00", "%d %b %Y %H:%M:%S")))
print(second_nov)
end = timeperiod.get_next_invalid_time_from_t(second_nov)
end = time.asctime(time.localtime(end))
self.assertEqual("Tue Nov 2 00:00:00 2010", end)
def test_get_valid_time(self):
"""
Test get next valid time
:return: None
"""
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'monday 00:00-24:00')
first_nov = int(time.mktime(time.strptime("26 Oct 2010 00:00:00", "%d %b %Y %H:%M:%S")))
print(first_nov)
start = timeperiod.get_next_valid_time_from_t(first_nov)
self.assertIsNotNone(start)
start = time.asctime(time.localtime(start))
self.assertEqual("Mon Nov 1 00:00:00 2010", start)
def test_simple_with_multiple_time_multiple_days(self):
"""
Test timeperiod with multiple daterange on multiple days:
* monday 00:00-07:00
* monday 21:30-24:00
* tuesday 00:00-07:00
* tuesday 21:30-24:00
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_12)
# Then a simple same day
timeperiod = Timeperiod({})
print("Cheking validity for", time.asctime(time.localtime(july_the_12)))
timeperiod.resolve_daterange(timeperiod.dateranges, 'monday 00:00-07:00,21:30-24:00')
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 00:00-07:00,21:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("RES:", t_next)
self.assertEqual("Mon Jul 12 21:30:00 2010", t_next)
# what about the next invalid?
t_next_inv = timeperiod.get_next_invalid_time_from_t(july_the_12)
t_next_inv = time.asctime(time.localtime(t_next_inv))
print("RES:", t_next_inv)
self.assertEqual("Mon Jul 12 15:00:00 2010", t_next_inv)
# what about a valid time and ask next invalid? Like at 22:00h?
july_the_12 = time.mktime(time.strptime("12 Jul 2010 22:00:00", "%d %b %Y %H:%M:%S"))
t_next_inv = timeperiod.get_next_invalid_time_from_t(july_the_12)
t_next_inv = time.asctime(time.localtime(t_next_inv))
print("RES:", t_next_inv) #, t.is_time_valid(july_the_12)
self.assertEqual("Tue Jul 13 07:00:01 2010", t_next_inv)
# Now ask about at 00:00 time?
july_the_12 = time.mktime(time.strptime("12 Jul 2010 00:00:00", "%d %b %Y %H:%M:%S"))
print("Cheking validity for", time.asctime(time.localtime(july_the_12)))
# Then a simple same day
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next?", t_next)
self.assertEqual("Mon Jul 12 00:00:00 2010", t_next)
def test_get_invalid_when_timeperiod_24x7(self):
"""
Test get the next invalid time when timeperiod 24x7
:return:
"""
now = time.time()
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Now look for the never case
tp_all = Timeperiod({})
tp_all.resolve_daterange(tp_all.dateranges, 'monday 00:00-24:00')
tp_all.resolve_daterange(tp_all.dateranges, 'tuesday 00:00-24:00')
tp_all.resolve_daterange(tp_all.dateranges, 'wednesday 00:00-24:00')
tp_all.resolve_daterange(tp_all.dateranges, 'thursday 00:00-24:00')
tp_all.resolve_daterange(tp_all.dateranges, 'friday 00:00-24:00')
tp_all.resolve_daterange(tp_all.dateranges, 'saturday 00:00-24:00')
tp_all.resolve_daterange(tp_all.dateranges, 'sunday 00:00-24:00')
t_next_inv = tp_all.get_next_invalid_time_from_t(july_the_12)
t_next_inv = time.asctime(time.localtime(t_next_inv))
print("RES:", t_next_inv) #, t.is_time_valid(july_the_12)
self.assertEqual('Tue Jul 19 00:00:00 2011', t_next_inv)
def test_simple_timeperiod_with_exclude(self):
"""
Test simple timeperiod with exclude periods
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print("July 12th, 2010: %s" % july_the_12)
# First a false test, no results
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, '1999-01-28 00:00-24:00')
t_next = timeperiod.get_next_valid_time_from_t(now)
self.assertIs(None, t_next)
# Then a simple same day
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 16:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print(t_next)
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
t2 = Timeperiod({})
t2.timeperiod_name = ''
t2.resolve_daterange(t2.dateranges, 'tuesday 08:30-21:00')
timeperiod.exclude = [t2]
# So the next will be after 16:30 and not before 21:00. So
# It will be 21:00:01 (first second after invalid is valid)
# we clean the cache of previous calc of t ;)
timeperiod.cache = {}
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("T nxt with exclude:", t_next)
self.assertEqual("Tue Jul 13 21:00:01 2010", t_next)
def test_dayweek_timeperiod_with_exclude(self):
"""
test dayweek timeperiod with exclude
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a simple same day
timeperiod = Timeperiod({})
timeperiod.timeperiod_name = 'T1'
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 2 16:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("T next", t_next)
self.assertEqual("Tue Jul 13 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
t2 = Timeperiod({})
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'tuesday 00:00-23:58')
timeperiod.exclude = [t2]
# We are a bad boy: first time period want a tuesday
# but exclude do not want it until 23:58. So next is 58 + 1 second :)
timeperiod.cache = {}
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Jul 13 23:58:01 2010', t_next)
t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude = time.asctime(time.localtime(t_exclude))
self.assertEqual('Tue Jul 13 00:00:00 2010', t_exclude)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
t_exclude_inv = time.asctime(time.localtime(t_exclude_inv))
self.assertEqual('Mon Jul 12 15:00:00 2010', t_exclude_inv)
def test_mondayweek_timeperiod_with_exclude(self):
"""
Test monday week timeperiod with exclude
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a simple same day
timeperiod = Timeperiod({})
timeperiod.timeperiod_name = 'T1'
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 3 16:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual("Tue Jul 20 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# And a good one: from april (so before) to august (after), and full time.
# But the 17 is a tuesday, but the 3 of august, so the next 3 tuesday is
# ..... the Tue Sep 21 :) Yes, we should wait quite a lot :)
t2 = Timeperiod({})
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'april 1 - august 23 00:00-24:00')
timeperiod.exclude = [t2]
timeperiod.cache = {}
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Sep 21 16:30:00 2010', t_next)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
t_exclude_inv = time.asctime(time.localtime(t_exclude_inv))
self.assertEqual('Tue Aug 24 00:00:00 2010', t_exclude_inv)
def test_mondayweek_timeperiod_with_exclude_bis(self):
"""
Test monday weeb timeperiod with exclude, version 2 :D
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a funny daterange
print("Testing daterange", 'tuesday -1 - monday 1 16:30-24:00')
timerange = Timeperiod({})
timerange.timeperiod_name = 'T1'
timerange.resolve_daterange(timerange.dateranges, 'tuesday -1 - monday 1 16:30-24:00')
t_next = timerange.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next without exclude", t_next)
self.assertEqual("Tue Jul 27 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# And a good one: from april (so before) to august (after), and full time.
# But the 27 is now not possible? So what next? Add a month!
# last tuesday of august, the 31 :)
t2 = Timeperiod({})
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'april 1 - august 16 00:00-24:00')
timerange.exclude = [t2]
timerange.cache = {}
t_next = timerange.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Aug 31 16:30:00 2010', t_next)
t_exclude = t2.get_next_valid_time_from_t(july_the_12)
t_exclude = time.asctime(time.localtime(t_exclude))
self.assertEqual('Mon Jul 12 15:00:00 2010', t_exclude)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
t_exclude_inv = time.asctime(time.localtime(t_exclude_inv))
self.assertEqual('Tue Aug 17 00:00:00 2010', t_exclude_inv)
def test_mondayweek_timeperiod_with_exclude_and_multiple_daterange(self):
"""
Test monday week timeperiod with exclude multiple dateranges
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a funny daterange
print("Testing daterange", 'tuesday -1 - monday 1 16:30-24:00')
timeperiod = Timeperiod({})
timeperiod.timeperiod_name = 'T1'
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday -1 - monday 1 16:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next without exclude", t_next)
self.assertEqual("Tue Jul 27 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# And a good one: from april (so before) to august (after), and full time.
# But the 27 is nw not possible? So what next? Add a month!
# But maybe it's not enough? :)
# The without the 2nd exclude, it's the Tues Aug 31, but it's inside
# saturday -1 - monday 1 because saturday -1 is the 28 august, so no.
# in september saturday -1 is the 25, and tuesday -1 is 28, so still no
# A month again! So now tuesday -1 is 26 and saturday -1 is 30. So ok
# for this one! that was quite long isn't it?
t2 = Timeperiod({})
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'april 1 - august 16 00:00-24:00')
t2.resolve_daterange(t2.dateranges, 'saturday -1 - monday 1 16:00-24:00')
timeperiod.exclude = [t2]
timeperiod.cache = {}
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Oct 26 16:30:00 2010', t_next)
t_exclude_inv = t2.get_next_invalid_time_from_t(july_the_12)
t_exclude_inv = time.asctime(time.localtime(t_exclude_inv))
self.assertEqual('Tue Aug 17 00:00:00 2010', t_exclude_inv)
def test_monweekday_timeperiod_with_exclude(self):
"""
Test mon week day timeperiod with exclude
:return: None
"""
now = time.time()
# Get the 12 of july 2010 at 15:00, monday
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
# Then a funny daterange
print("Testing daterange", 'tuesday -1 july - monday 1 september 16:30-24:00')
timeperiod = Timeperiod({})
timeperiod.timeperiod_name = 'T1'
timeperiod.resolve_daterange(timeperiod.dateranges,
'tuesday -1 july - monday 1 september 16:30-24:00')
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
print("Next without exclude", t_next)
self.assertEqual("Tue Jul 27 16:30:00 2010", t_next)
# Now we add this timeperiod an exception
# and from april (before) to august monday 3 (monday 16 august),
t2 = Timeperiod({})
t2.timeperiod_name = 'T2'
t2.resolve_daterange(t2.dateranges, 'thursday 1 april - monday 3 august 00:00-24:00')
timeperiod.exclude = [t2]
timeperiod.cache = {}
t_next = timeperiod.get_next_valid_time_from_t(july_the_12)
t_next = time.asctime(time.localtime(t_next))
self.assertEqual('Tue Aug 17 16:30:00 2010', t_next)
def test_dayweek_exclusion_timeperiod(self):
"""
Test week day timeperiod with exclusion
:return: None
"""
now = time.time()
# Get the 13 of july 2010 at 15:00, tuesday
july_the_13 = time.mktime(time.strptime("13 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_13)
# Now we add this timeperiod an exception
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, 'monday 00:00-24:00')
timeperiod.resolve_daterange(timeperiod.dateranges, 'tuesday 00:00-24:00')
timeperiod.resolve_daterange(timeperiod.dateranges, 'wednesday 00:00-24:00')
t2 = Timeperiod({})
t2.timeperiod_name = ''
t2.resolve_daterange(t2.dateranges, 'tuesday 00:00-24:00')
timeperiod.exclude = [t2]
t_next = timeperiod.get_next_valid_time_from_t(july_the_13)
t_next = time.asctime(time.localtime(t_next))
print("T next", t_next)
self.assertEqual("Wed Jul 14 00:00:00 2010", t_next)
july_the_12 = time.mktime(time.strptime("12 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
t_inv = timeperiod.get_next_invalid_time_from_t(july_the_12)
t_inv = time.asctime(time.localtime(t_inv))
self.assertEqual('Tue Jul 13 00:00:00 2010', t_inv)
def test_dayweek_exclusion_timeperiod_with_day_range(self):
"""
Test day week timeperiod with exclude day range
:return: None
"""
# Get the 13 of july 2010 at 15:00, tuesday
july_the_13 = time.mktime(time.strptime("13 Jul 2010 15:00:00", "%d %b %Y %H:%M:%S"))
print(july_the_13)
timeperiod = Timeperiod({})
timeperiod.resolve_daterange(timeperiod.dateranges, '2010-03-01 - 2020-03-01 00:00-24:00')
# Now we add this timeperiod an exception
t2 = Timeperiod({})
t2.timeperiod_name = ''
t2.resolve_daterange(t2.dateranges, 'tuesday 00:00-24:00')
timeperiod.exclude = [t2]
t_next = timeperiod.get_next_valid_time_from_t(july_the_13)
t_next = time.asctime(time.localtime(t_next))
print("T next", t_next)
self.assertEqual("Wed Jul 14 00:00:00 2010", t_next)
def test_issue_1385(self):
"""
https://github.com/naparuba/shinken/issues/1385
"""
tp = Timeperiod({})
tp.timeperiod_name = 'mercredi2-22-02'
tp.resolve_daterange(tp.dateranges, 'wednesday 2 00:00-02:00,22:00-24:00')
tp.resolve_daterange(tp.dateranges, 'thursday 2 00:00-02:00,22:00-24:00')
valid_times = (
(2014, 11, 12, 1, 0), # second wednesday of november @ 01:00
(2014, 11, 12, 23, 0), # same @23:00
(2014, 11, 13, 0, 0), # second thursday @ 00:00
# in december:
(2014, 12, 10, 1, 0), # second wednesday @ 01:00
(2014, 12, 10, 23, 0), # second wednesday @ 23:00
(2014, 12, 11, 1, 0), # second thursday @ 01:00
(2014, 12, 11, 23, 0), # second thursday @ 23:00
)
for valid in valid_times:
dt = datetime.datetime(*valid)
valid_tm = time.mktime(dt.timetuple())
self.assertTrue(tp.is_time_valid(valid_tm))
invalid_times = (
(2014, 11, 12, 3, 0), # second wednesday of november @ 03:00
(2014, 11, 3, 1, 0), # first wednesday ..
(2014, 11, 4, 1, 0), # first thursday
(2014, 11, 17, 1, 0), # third monday
(2014, 11, 18, 1, 0), # third tuesday
# in december:
(2014, 12, 5, 3, 0), # first friday
(2014, 12, 17, 1, 0), # third wednesday
(2014, 12, 18, 1, 0), # third thursday
(2014, 12, 24, 1, 0), # fourth wednesday
(2014, 12, 25, 1, 0), # fourth thursday
(2014, 12, 31, 1, 0),
)
for invalid in invalid_times:
dt = datetime.datetime(*invalid)
invalid_tm = time.mktime(dt.timetuple())
self.assertFalse(tp.is_time_valid(invalid_tm))
def test_timeperiod_multiple_monday(self):
"""
Test with multiple monday
:return: None
"""
self.setup_with_file('cfg/cfg_timeperiods.cfg')
tp = self._arbiter.conf.timeperiods.find_by_name("us-holidays")
self.assertEqual(7, len(tp.dateranges))
mydateranges = []
for daterange in tp.dateranges:
mydateranges.append({
'smon': daterange.smon,
'smday': daterange.smday,
'swday': daterange.swday,
'swday_offset': daterange.swday_offset
})
ref = [
{
'smon': 1,
'smday': 1,
'swday': 0,
'swday_offset': 0
},
{
'smon': 5,
'smday': 0,
'swday': 0,
'swday_offset': -1
},
{
'smon': 7,
'smday': 4,
'swday': 0,
'swday_offset': 0
},
{
'smon': 9,
'smday': 0,
'swday': 0,
'swday_offset': 1
},
{
'smon': 11,
'smday': 0,
'swday': 3,
'swday_offset': -1
},
{
'smon': 12,
'smday': 25,
'swday': 0,
'swday_offset': 0
},
{
'smon': 7,
'smday': 14,
'swday': 0,
'swday_offset': 0
},
]
self.assertItemsEqual(ref, mydateranges)
|
class TestTimeperiods(AlignakTest):
def setUp(self):
pass
def test_timeperiod_no_daterange(self):
'''
Test with a timeperiod have no daterange
:return: None
'''
pass
def test_simple_timeperiod(self):
'''
Test a timeperiod with one timerange
:return: None
'''
pass
def test_simple_with_multiple_time(self):
'''
Test timeperiod with 2 ranges:
* tuesday 00:00-07:00
* tuesday 21:30-24:00
:return: None
'''
pass
def test_get_invalid_time(self):
'''
Test get next invalid time
:return: None
'''
pass
def test_get_invalid_time_with_exclude(self):
'''
Test get next invalid time with exclude
:return: None
'''
pass
def test_get_valid_time(self):
'''
Test get next valid time
:return: None
'''
pass
def test_simple_with_multiple_time_multiple_days(self):
'''
Test timeperiod with multiple daterange on multiple days:
* monday 00:00-07:00
* monday 21:30-24:00
* tuesday 00:00-07:00
* tuesday 21:30-24:00
:return: None
'''
pass
def test_get_invalid_when_timeperiod_24x7(self):
'''
Test get the next invalid time when timeperiod 24x7
:return:
'''
pass
def test_simple_timeperiod_with_exclude(self):
'''
Test simple timeperiod with exclude periods
:return: None
'''
pass
def test_dayweek_timeperiod_with_exclude(self):
'''
test dayweek timeperiod with exclude
:return: None
'''
pass
def test_mondayweek_timeperiod_with_exclude(self):
'''
Test monday week timeperiod with exclude
:return: None
'''
pass
def test_mondayweek_timeperiod_with_exclude_bis(self):
'''
Test monday weeb timeperiod with exclude, version 2 :D
:return: None
'''
pass
def test_mondayweek_timeperiod_with_exclude_and_multiple_daterange(self):
'''
Test monday week timeperiod with exclude multiple dateranges
:return: None
'''
pass
def test_monweekday_timeperiod_with_exclude(self):
'''
Test mon week day timeperiod with exclude
:return: None
'''
pass
def test_dayweek_exclusion_timeperiod(self):
'''
Test week day timeperiod with exclusion
:return: None
'''
pass
def test_dayweek_exclusion_timeperiod_with_day_range(self):
'''
Test day week timeperiod with exclude day range
:return: None
'''
pass
def test_issue_1385(self):
'''
https://github.com/naparuba/shinken/issues/1385
'''
pass
def test_timeperiod_multiple_monday(self):
'''
Test with multiple monday
:return: None
'''
pass
| 20 | 18 | 30 | 3 | 20 | 8 | 1 | 0.41 | 1 | 4 | 1 | 0 | 19 | 0 | 19 | 74 | 588 | 79 | 375 | 110 | 355 | 153 | 306 | 110 | 286 | 3 | 2 | 1 | 22 |
4,005 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_unserialize.py
|
tests.test_unserialize.TestUnserialize
|
class TestUnserialize(AlignakTest):
"""
This class test the unserialize process
"""
def setUp(self):
super(TestUnserialize, self).setUp()
def test_unserialize_notif(self):
""" Test unserialize notifications
:return: None
"""
var = '''
{"98a76354619746fa8e6d2637a5ef94cb": {
"content": {
"reason_type": 1, "exit_status": 3, "creation_time":1468522950.2828259468,
"command_call": {
"args": [], "call": "notify-service",
"command": {
"command_line": "$USER1$\/notifier.pl
--hostname $HOSTNAME$
--servicedesc $SERVICEDESC$
--notificationtype $NOTIFICATIONTYPE$
--servicestate $SERVICESTATE$
--serviceoutput $SERVICEOUTPUT$
--longdatetime $LONGDATETIME$
--serviceattempt $SERVICEATTEMPT$
--servicestatetype $SERVICESTATETYPE$",
"command_name": "notify-service",
"configuration_errors":[],
"configuration_warnings":[],
"enable_environment_macros": false,
"id": "487aa432ddf646079ec6c07803333eac",
"imported_from": "cfg\/default\/commands.cfg:14",
"macros":{}, "module_type": "fork", "my_type":"command",
"ok_up":"", "poller_tag": "None",
"properties":{
"use":{
"brok_transformation": null,
"class_inherit": [],
"conf_send_preparation": null,
"default":[],
"fill_brok":[],
"has_default":true,
"help":"",
"keep_empty":false,
"managed":true,
"merging":"uniq",
"no_slots":false,
"override":false,
"required":false,
"retention":false,
"retention_preparation":null,
"special":false,
"split_on_coma":true,
"to_send":false,
"unmanaged":false,
"unused":false},
"name":{
"brok_transformation":null,
"class_inherit":[],
"conf_send_preparation":null,
"default":"",
"fill_brok":[],
"has_default":true,
"help":"",
"keep_empty":false,
"managed":true,
"merging":"uniq",
"no_slots":false,
"override":false,
"required":false,
"retention":false,
"retention_preparation":null,
"special":false,
"split_on_coma":true,
"to_send":false,
"unmanaged":false,
"unused":false},
},
"reactionner_tag":"None",
"running_properties":{
"configuration_errors":{
"brok_transformation":null,
"class_inherit":[],
"conf_send_preparation":null,
"default":[],"fill_brok":[],
"has_default":true,"help":"","keep_empty":false,
"managed":true,"merging":"uniq","no_slots":false,"override":false,
"required":false,"retention":false,"retention_preparation":null,
"special":false,"split_on_coma":true,"to_send":false,
"unmanaged":false,"unused":false},
},
"tags":[],
"timeout":-1,
"uuid":"487aa432ddf646079ec6c07803333eac"},
"enable_environment_macros":false,
"late_relink_done":false,
"macros":{},
"module_type":"fork",
"my_type":"CommandCall",
"poller_tag":"None",
"properties":{},
"reactionner_tag":"None",
"timeout":-1,
"uuid":"cfcaf0fc232b4f59a7d8bb5bd1d83fef",
"valid":true},
"escalated":false,
"reactionner_tag":"None",
"s_time":0.0,
"notification_type":0,
"contact_name":"test_contact",
"type":"PROBLEM",
"uuid":"98a76354619746fa8e6d2637a5ef94cb",
"check_time":0,"ack_data":"",
"state":0,"u_time":0.0,
"env":{
"NAGIOS_SERVICEDOWNTIME":"0",
"NAGIOS_TOTALSERVICESUNKNOWN":"",
"NAGIOS_LONGHOSTOUTPUT":"",
"NAGIOS_HOSTDURATIONSEC":"1468522950",
"NAGIOS_HOSTDISPLAYNAME":"test_host_0",
},
"notif_nb":1,"_in_timeout":false,"enable_environment_macros":false,
"host_name":"test_host_0",
"status":"scheduled",
"execution_time":0.0,"start_time":0,"worker":"none","t_to_go":1468522950,
"module_type":"fork","service_description":"test_ok_0","sched_id":0,"ack_author":"",
"ref":"272e89c1de854bad85987a7583e6c46b",
"is_a":"notification",
"contact":"4e7c4076c372457694684bdd5ba47e94",
"command":"\/notifier.pl --hostname test_host_0 --servicedesc test_ok_0
--notificationtype PROBLEM --servicestate CRITICAL
--serviceoutput CRITICAL --longdatetime Thu 14 Jul 21:02:30 CEST 2016
--serviceattempt 2 --servicestatetype HARD",
"end_time":0,"timeout":30,"output":"",
"already_start_escalations":[]},
"__sys_python_module__":"alignak.notification.Notification"
}
}
'''
unserialize(var)
assert True
def test_unserialize_check(self):
""" Test unserialize checks
:return: None
"""
var = '''
{"content":
{"check_type":0,"exit_status":3,"creation_time":1469152287.6731250286,
"reactionner_tag":"None","s_time":0.0,
"uuid":"5f1b16fa809c43379822c7acfe789660","check_time":0,"long_output":"",
"state":0,"internal":false,"u_time":0.0,"env":{},"depend_on_me":[],
"ref":"1fe5184ea05d439eb045399d26ed3337","from_trigger":false,
"status":"scheduled","execution_time":0.0,"worker":"none","t_to_go":1469152290,
"module_type":"echo","_in_timeout":false,"dependency_check":false,"type":"",
"depend_on":[],"is_a":"check","poller_tag":"None","command":"_echo",
"timeout":30,"output":"","perf_data":""},
"__sys_python_module__":"alignak.check.Check"
}
'''
unserialize(var)
assert True
|
class TestUnserialize(AlignakTest):
'''
This class test the unserialize process
'''
def setUp(self):
pass
def test_unserialize_notif(self):
''' Test unserialize notifications
:return: None
'''
pass
def test_unserialize_check(self):
''' Test unserialize checks
:return: None
'''
pass
| 4 | 3 | 53 | 1 | 50 | 2 | 1 | 0.06 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 58 | 166 | 5 | 152 | 6 | 148 | 9 | 11 | 6 | 7 | 1 | 2 | 0 | 3 |
4,006 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/xxx_test_modules_inner.py
|
tests.xxx_test_modules_inner.TestInnerModules
|
class TestInnerModules(AlignakTest):
"""
This class contains the tests for the Alignak inner defined modules
"""
def setUp(self):
super(TestInnerModules, self).setUp()
self.set_unit_tests_logger_level('INFO')
def test_module_inner_retention_legacy_cfg(self):
""" Test the inner retention module
Configured in Nagios cfg file
"""
self._module_inner_retention()
def test_module_inner_retention_alignak_ini(self):
""" Test the inner retention module
Configured in alignak.ini file
"""
self._module_inner_retention()
def _module_inner_retention(self, legacy_cfg=False):
""" Test the inner retention module
Module is self-created when a Nagios retention parameter is detected
This module implements the `load_retention` and `save_retention` scheduler hooks
:return:
"""
self.cfg_folder = '/tmp/alignak'
cfg_dir = 'default_many_hosts'
hosts_count = 2
realms = ['All']
# Default shipped configuration preparation
self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder)
# Specific daemon load configuration preparation
if os.path.exists('./cfg/%s/alignak.cfg' % cfg_dir):
shutil.copy('./cfg/%s/alignak.cfg' % cfg_dir, '%s/etc' % self.cfg_folder)
if os.path.exists('%s/etc/arbiter' % self.cfg_folder):
shutil.rmtree('%s/etc/arbiter' % self.cfg_folder)
shutil.copytree('./cfg/%s/arbiter' % cfg_dir, '%s/etc/arbiter' % self.cfg_folder)
self._prepare_hosts_configuration(cfg_folder='%s/etc/arbiter/objects/hosts'
% self.cfg_folder,
hosts_count=hosts_count, target_file_name='hosts.cfg',
realms=realms)
# Update the default configuration files
files = ['%s/etc/alignak.ini' % self.cfg_folder]
try:
cfg = configparser.ConfigParser()
cfg.read(files)
# Define Nagios state retention module configuration parameter
if not legacy_cfg:
cfg.set('alignak-configuration', 'retain_state_information', '1')
cfg.set('alignak-configuration', 'state_retention_file',
'%s/retention.json' % self.cfg_folder)
# # Define the inner retention module
# Not necessary to defined a odule but it may also be done!
# cfg.set('daemon.scheduler-master', 'modules', 'inner-retention')
#
# # Define Alignak inner module configuration
# cfg.add_section('module.inner-retention')
# cfg.set('module.inner-retention', 'name', 'inner-retention')
# cfg.set('module.inner-retention', 'type', 'retention')
# cfg.set('module.inner-retention', 'python_name', 'alignak.modules.retention')
with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
cfg.write(modified)
except Exception as exp:
print("* parsing error in config file: %s" % exp)
assert False
# # Define Nagios state retention module configuration parameter
if legacy_cfg:
with open('%s/etc/alignak.cfg' % self.cfg_folder, "a") as modified:
modified.write("retain_state_information=1\n\nstate_retention_file=/tmp/retention.json")
self.setup_with_file(env_file='%s/etc/alignak.ini' % self.cfg_folder,
dispatching=True)
assert self.conf_is_correct
self.show_configuration_logs()
# No scheduler modules created
modules = [m.module_alias for m in self._scheduler_daemon.modules]
assert modules == ['inner-retention']
modules = [m.name for m in self._scheduler_daemon.modules]
assert modules == ['inner-retention']
# Loading module logs
self.assert_any_log_match(re.escape(
u"Importing Python module 'alignak.modules.inner_retention' for inner-retention..."
))
self.assert_any_log_match(re.escape(
u"Imported 'alignak.modules.inner_retention' for inner-retention"
))
self.assert_any_log_match(re.escape(
u"Give an instance of alignak.modules.inner_retention for alias: inner-retention"
))
self.assert_any_log_match(re.escape(
u"I correctly loaded my modules: [inner-retention]"
))
# Load retention - file is not yet existing!
self.clear_logs()
self._scheduler.hook_point('load_retention')
self.show_logs()
# Save retention
self.clear_logs()
self._scheduler.hook_point('save_retention')
self.show_logs()
assert os.path.exists('/tmp/alignak/retention.json')
with open('/tmp/alignak/retention.json', "r") as fd:
response = json.load(fd)
# Load retention - file is now existing
self.clear_logs()
self._scheduler.hook_point('load_retention')
self.show_logs()
|
class TestInnerModules(AlignakTest):
'''
This class contains the tests for the Alignak inner defined modules
'''
def setUp(self):
pass
def test_module_inner_retention_legacy_cfg(self):
''' Test the inner retention module
Configured in Nagios cfg file
'''
pass
def test_module_inner_retention_alignak_ini(self):
''' Test the inner retention module
Configured in alignak.ini file
'''
pass
def _module_inner_retention(self, legacy_cfg=False):
''' Test the inner retention module
Module is self-created when a Nagios retention parameter is detected
This module implements the `load_retention` and `save_retention` scheduler hooks
:return:
'''
pass
| 5 | 4 | 30 | 5 | 18 | 8 | 2 | 0.46 | 1 | 3 | 0 | 0 | 4 | 1 | 4 | 59 | 128 | 24 | 71 | 16 | 66 | 33 | 58 | 13 | 53 | 6 | 2 | 2 | 9 |
4,007 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests_integ/alignak_test.py
|
tests_integ.alignak_test.AlignakTest
|
class AlignakTest(unittest2.TestCase):
if sys.version_info < (2, 7):
def assertRegex(self, *args, **kwargs):
return self.assertRegex(*args, **kwargs)
def setUp(self):
"""All tests initialization:
- output test identifier
- setup test logger
- track running Alignak daemons
- output system cpu/memory
"""
self.my_pid = os.getpid()
print("\n" + self.id())
print("-" * 80)
self._launch_dir = os.getcwd()
print("Test current working directory: %s" % self._launch_dir)
self._test_dir = os.path.dirname(__file__)
print("Test file directory: %s" % self._test_dir)
# Configure Alignak logger with test configuration
logger_configuration_file = os.path.join(self._test_dir, './etc/alignak-logger.json')
print("Logger configuration: %s" % logger_configuration_file)
# try:
# os.makedirs('/tmp/monitoring-log')
# except OSError as exp:
# pass
self.former_log_level = None
# Call with empty parameters to force log file truncation!
setup_logger(logger_configuration_file, log_dir=None, process_name='', log_file='')
self.logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
self.logger_.warning("Test: %s", self.id())
# To make sure that no running daemon exist
print("Checking Alignak running daemons...")
running_daemons = False
for daemon in ['broker', 'poller', 'reactionner', 'receiver', 'scheduler', 'arbiter']:
for proc in psutil.process_iter():
if 'alignak' in proc.name() and daemon in proc.name():
running_daemons = True
if running_daemons:
self._stop_alignak_daemons(arbiter_only=False)
# assert False, "*** Found a running Alignak daemon: %s" % (proc.name())
print("System information:")
perfdatas = []
cpu_count = psutil.cpu_count()
perfdatas.append("'cpu_count'=%d" % cpu_count)
cpu_percents = psutil.cpu_percent(percpu=True)
cpu = 1
for percent in cpu_percents:
perfdatas.append("'cpu_%d_percent'=%.2f%%" % (cpu, percent))
cpu += 1
print("-> cpu: %s" % " ".join(perfdatas))
perfdatas = []
virtual_memory = psutil.virtual_memory()
for key in virtual_memory._fields:
if 'percent' in key:
perfdatas.append("'mem_percent_used_%s'=%.2f%%"
% (key, getattr(virtual_memory, key)))
swap_memory = psutil.swap_memory()
for key in swap_memory._fields:
if 'percent' in key:
perfdatas.append("'swap_used_%s'=%.2f%%"
% (key, getattr(swap_memory, key)))
print("-> memory: %s" % " ".join(perfdatas))
print(("-" * 80) + "\n")
def tearDown(self):
"""Test ending:
- restore initial log level if it got changed
"""
# Clear Alignak unit tests log list
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if getattr(handler, '_name', None) == 'unit_tests':
print("Log handler %s, stored %d logs" % (handler._name, len(handler.collector)))
handler.collector = []
# Restore the collector logger log level
if self.former_log_level:
handler.level = self.former_log_level
break
def set_unit_tests_logger_level(self, log_level=logging.DEBUG):
"""Set the test logger at the provided level -
useful for some tests that check debug log
"""
# Change the logger and its handlers log level
print("Set unit_tests logger: %s" % log_level)
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
logger_.setLevel(log_level)
for handler in logger_.handlers:
print("- handler: %s" % handler)
# handler.setLevel(log_level)
if getattr(handler, '_name', None) == 'unit_tests':
self.former_log_level = handler.level
handler.setLevel(log_level)
print("Unit tests handler is set at %s" % log_level)
break
def _prepare_hosts_configuration(self, cfg_folder, hosts_count=10,
target_file_name=None, realms=None):
"""Prepare the Alignak configuration
:return: the count of errors raised in the log files
"""
start = time.time()
if realms is None:
realms = ['All']
filename = cfg_folder + '/test-templates/host.tpl'
if os.path.exists(filename):
with open(filename, "r") as pattern_file:
host_pattern = pattern_file.read()
host_pattern = host_pattern.decode('utf-8')
else:
host_pattern = """
define host {
# Variable defined
use test-host
contact_groups admins
#hostgroups allhosts
host_name host-%s-%s
address 127.0.0.1
realm %s
}
"""
hosts = ""
hosts_set = 0
for realm in realms:
for index in range(hosts_count):
hosts = hosts + (host_pattern % (realm.lower(), index, realm)) + "\n"
hosts_set += 1
filename = os.path.join(cfg_folder, 'many_hosts_%d.cfg' % hosts_count)
if target_file_name is not None:
filename = os.path.join(cfg_folder, target_file_name)
if os.path.exists(filename):
os.remove(filename)
with open(filename, 'w') as outfile:
outfile.write(hosts)
print("Prepared a configuration with %d hosts, duration: %d seconds"
% (hosts_set, (time.time() - start)))
def _prepare_configuration(self, copy=True, cfg_folder='/tmp/alignak', daemons_list=None):
if daemons_list is None:
daemons_list = ['arbiter-master', 'scheduler-master', 'broker-master',
'poller-master', 'reactionner-master', 'receiver-master']
cfg_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), cfg_folder)
# Copy the default Alignak shipped configuration to the run directory
if copy:
print("Copy default configuration (../etc) to %s..." % cfg_folder)
if os.path.exists('%s/etc' % cfg_folder):
shutil.rmtree('%s/etc' % cfg_folder)
shutil.copytree(os.path.join(self._test_dir, '../etc'),
'%s/etc' % cfg_folder)
# Load and update the configuration
for f in ['alignak.log', 'alignak-events.log']:
if os.path.exists('%s/log/%s' % (cfg_folder, f)):
os.remove('%s/log/%s' % (cfg_folder, f))
# Clean the former existing pid and log files
print("Cleaning pid and log files...")
for daemon in daemons_list:
if os.path.exists('%s/run/%s.pid' % (cfg_folder, daemon)):
print("- removing pid %s/run/%s.pid" % (cfg_folder, daemon))
os.remove('%s/run/%s.pid' % (cfg_folder, daemon))
if os.path.exists('%s/log/%s.log' % (cfg_folder, daemon)):
print("- removing log %s/log/%s.log" % (cfg_folder, daemon))
os.remove('%s/log/%s.log' % (cfg_folder, daemon))
# Update monitoring configuration parameters
files = ['%s/etc/alignak.ini' % cfg_folder,
'%s/etc/alignak.d/daemons.ini' % cfg_folder,
'%s/etc/alignak.d/modules.ini' % cfg_folder]
# Update monitoring configuration file variables
try:
cfg = configparser.ConfigParser()
cfg.read(files)
# Configuration directories
cfg.set('DEFAULT', '_dist', cfg_folder)
# Do not set a specific bin directory to use the default Alignak one
cfg.set('DEFAULT', '_dist_BIN', '')
cfg.set('DEFAULT', '_dist_ETC', '%s/etc' % cfg_folder)
cfg.set('DEFAULT', '_dist_VAR', '%s/var' % cfg_folder)
cfg.set('DEFAULT', '_dist_RUN', '%s/run' % cfg_folder)
cfg.set('DEFAULT', '_dist_LOG', '%s/log' % cfg_folder)
# Logger configuration file
cfg.set('DEFAULT', 'logger_configuration', '%s/etc/alignak-logger.json' % cfg_folder)
# Nagios legacy files
cfg.set('alignak-configuration', 'cfg', '%s/etc/alignak.cfg' % cfg_folder)
# Directory for running daemons
cfg.set('alignak-configuration', 'daemons_script_location', '')
# Daemons launching and check
cfg.set('alignak-configuration', 'polling_interval', '1')
cfg.set('alignak-configuration', 'daemons_check_period', '1')
cfg.set('alignak-configuration', 'daemons_stop_timeout', '10')
cfg.set('alignak-configuration', 'daemons_start_timeout', '1')
cfg.set('alignak-configuration', 'daemons_new_conf_timeout', '1')
cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '1')
# Poller/reactionner workers count limited to 1
cfg.set('alignak-configuration', 'min_workers', '1')
cfg.set('alignak-configuration', 'max_workers', '1')
with open('%s/etc/alignak.ini' % cfg_folder, "w") as modified:
cfg.write(modified)
except Exception as exp:
print("* parsing error in config file: %s" % exp)
assert False
def _files_update(self, files, replacements):
"""Update files content with the defined replacements
:param files: list of files to parse and replace
:param replacements: list of values to replace
:return:
"""
for filename in files:
lines = []
with open(filename) as infile:
for line in infile:
for src, target in replacements.items():
line = line.replace(src, target)
lines.append(line)
with open(filename, 'w') as outfile:
for line in lines:
outfile.write(line)
def _stop_alignak_daemons(self, arbiter_only=True, request_stop_uri=''):
""" Stop the Alignak daemons started formerly
If request_stop is set, this function will try to stop the daemons with the
/stop_request API, else it will directly send a kill signal.
If some alignak- daemons are still running after the kill, force kill them.
:return: None
"""
print("Stopping the daemons...")
start = time.time()
if request_stop_uri:
req = requests.Session()
raw_data = req.get("%s/stop_request?stop_now=1" % request_stop_uri)
data = raw_data.json()
# Let the process 20 seconds to exit
time.sleep(20)
no_daemons = True
for daemon in ['broker', 'poller', 'reactionner', 'receiver', 'scheduler', 'arbiter']:
for proc in psutil.process_iter():
try:
if daemon not in proc.name():
continue
if getattr(self, 'my_pid', None) and proc.pid == self.my_pid:
continue
print("- ***** remaining %s / %s" % (proc.name(), proc.status()))
if proc.status() == 'running':
no_daemons = False
except psutil.NoSuchProcess:
print("not existing!")
continue
except psutil.TimeoutExpired:
print("***** timeout 10 seconds, force-killing the daemon...")
# Do not assert because some processes are sometimes zombies that are
# removed by the Python GC
# assert no_daemons
return
if getattr(self, 'procs', None):
for name, proc in list(self.procs.items()):
if arbiter_only and name not in ['arbiter-master']:
continue
if getattr(self, 'my_pid', None) and proc.pid == self.my_pid:
print("- do not kill myself!")
continue
print("Asking %s (pid=%d) to end..." % (name, proc.pid))
try:
daemon_process = psutil.Process(proc.pid)
except psutil.NoSuchProcess:
print("not existing!")
continue
# children = daemon_process.children(recursive=True)
daemon_process.terminate()
try:
# The default arbiter / daemons stopping process is 30 seconds graceful ... so
# not really compatible with this default delay. The test must update the
# default delay or set a shorter delay than the default one
daemon_process.wait(10)
except psutil.TimeoutExpired:
print("***** stopping timeout 10 seconds, force-killing the daemon...")
daemon_process.kill()
except psutil.NoSuchProcess:
print("not existing!")
pass
print("%s terminated" % (name))
print("Stopping daemons duration: %d seconds" % (time.time() - start))
time.sleep(1.0)
print("Killing remaining processes...")
for daemon in ['broker', 'poller', 'reactionner', 'receiver', 'scheduler', 'arbiter']:
for proc in psutil.process_iter():
try:
if daemon not in proc.name():
continue
if getattr(self, 'my_pid', None) and proc.pid == self.my_pid:
continue
print("- killing %s" % (proc.name()))
daemon_process = psutil.Process(proc.pid)
daemon_process.terminate()
daemon_process.wait(10)
except psutil.AccessDenied:
print("-> access denied...")
continue
except psutil.NoSuchProcess:
print("-> not existing!")
continue
except psutil.TimeoutExpired:
print("-> timeout 10 seconds, force-killing the daemon...")
daemon_process.kill()
def _run_command_with_timeout(self, cmd, timeout_sec):
"""Execute `cmd` in a subprocess and enforce timeout `timeout_sec` seconds.
Return subprocess exit code on natural completion of the subprocess.
Returns None if timeout expires before subprocess completes."""
start = time.time()
proc = subprocess.Popen(cmd)
print("%s launched (pid=%d)" % (cmd, proc.pid))
timer = threading.Timer(timeout_sec, proc.kill)
timer.start()
proc.communicate()
if timer.is_alive():
# Process completed naturally - cancel timer and return exit code
timer.cancel()
print("-> exited with %s after %.2d seconds" % (proc.returncode, time.time() - start))
return proc.returncode
# Process killed by timer - raise exception
print('Process #%d killed after %f seconds' % (proc.pid, timeout_sec))
return None
def _run_alignak_daemons(self, cfg_folder='/tmp/alignak', runtime=30,
daemons_list=None, spare_daemons=[], piped=False, run_folder='',
arbiter_only=True, update_configuration=True, verbose=False):
""" Run the Alignak daemons for a passive configuration
Let the daemons run for the number of seconds defined in the runtime parameter and
then kill the required daemons (list in the spare_daemons parameter)
Check that the run daemons did not raised any ERROR log
:return: None
"""
if daemons_list is None:
daemons_list = [
'scheduler-master', 'broker-master',
'poller-master', 'reactionner-master', 'receiver-master'
]
# Load and test the configuration
cfg_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), cfg_folder)
if not run_folder:
run_folder = cfg_folder
print("%s - Running Alignak daemons, cfg_folder: %s, run_folder: %s"
% (datetime.datetime.now(), cfg_folder, run_folder))
for f in ['alignak.log', 'alignak-events.log']:
if os.path.exists('%s/log/%s' % (cfg_folder, f)):
os.remove('%s/log/%s' % (cfg_folder, f))
# Clean the former existing pid and log files
print("Cleaning pid and log files...")
for daemon in daemons_list + ['arbiter-master']:
if os.path.exists('%s/%s.pid' % (self._launch_dir, daemon)):
print("- removing pid %s/%s.pid" % (self._launch_dir, daemon))
os.remove('%s/%s.pid' % (self._launch_dir, daemon))
if os.path.exists('%s/run/%s.pid' % (run_folder, daemon)):
print("- removing pid %s/run/%s.pid" % (run_folder, daemon))
os.remove('%s/run/%s.pid' % (run_folder, daemon))
if os.path.exists('%s/%s.log' % (self._launch_dir, daemon)):
print("- removing log %s/%s.log" % (self._launch_dir, daemon))
os.remove('%s/%s.log' % (self._launch_dir, daemon))
if os.path.exists('%s/log/%s.log' % (run_folder, daemon)):
print("- removing log %s/log/%s.log" % (run_folder, daemon))
os.remove('%s/log/%s.log' % (run_folder, daemon))
# Update monitoring configuration parameters
if update_configuration:
files = ['%s/etc/alignak.ini' % cfg_folder,
'%s/etc/alignak.d/daemons.ini' % cfg_folder,
'%s/etc/alignak.d/modules.ini' % cfg_folder]
# Update monitoring configuration file variables
try:
cfg = configparser.ConfigParser()
cfg.read(files)
# Configuration directories
cfg.set('DEFAULT', '_dist', cfg_folder)
# Do not set a specific bin directory to use the default Alignak one
cfg.set('DEFAULT', '_dist_BIN', '')
cfg.set('DEFAULT', '_dist_ETC', '%s/etc' % cfg_folder)
cfg.set('DEFAULT', '_dist_VAR', '%s/var' % run_folder)
cfg.set('DEFAULT', '_dist_RUN', '%s/run' % run_folder)
cfg.set('DEFAULT', '_dist_LOG', '%s/log' % run_folder)
# Nagios legacy files
cfg.set('alignak-configuration', 'cfg', '%s/etc/alignak.cfg' % cfg_folder)
# Directory for running daemons
cfg.set('alignak-configuration', 'daemons_script_location', '')
# Daemons launching and check
cfg.set('alignak-configuration', 'polling_interval', '1')
cfg.set('alignak-configuration', 'daemons_check_period', '1')
cfg.set('alignak-configuration', 'daemons_stop_timeout', '10')
cfg.set('alignak-configuration', 'daemons_start_timeout', '5')
cfg.set('alignak-configuration', 'daemons_new_conf_timeout', '1')
cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '1')
# Poller/reactionner workers count limited to 1
cfg.set('alignak-configuration', 'min_workers', '1')
cfg.set('alignak-configuration', 'max_workers', '1')
with open('%s/etc/alignak.ini' % cfg_folder, "w") as modified:
cfg.write(modified)
except Exception as exp:
print("* parsing error in config file: %s" % exp)
assert False
# If some Alignak daemons are still running...
self._stop_alignak_daemons()
# # # Some script commands may exist in the test folder ...
# if os.path.exists(cfg_folder + '/dummy_command.sh'):
# shutil.copy(cfg_folder + '/dummy_command.sh', '/tmp/dummy_command.sh')
#
print("%s - Launching the daemons..." % datetime.datetime.now())
self.procs = {}
for name in daemons_list + ['arbiter-master']:
if arbiter_only and name not in ['arbiter-master']:
continue
args = [
os.path.join(self._test_dir, "../alignak/bin/alignak_%s.py" % name.split('-')[0]),
"-n", name, "-e", "%s/etc/alignak.ini" % cfg_folder
]
if verbose:
args.append("--debug")
print("- %s arguments: %s" % (name, args))
if piped:
print("- capturing stdout/stderr" % name)
self.procs[name] = \
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
self.procs[name] = subprocess.Popen(args)
time.sleep(0.1)
print("- %s launched (pid=%d)" % (name, self.procs[name].pid))
time.sleep(3)
print("Testing daemons start")
for name, proc in list(self.procs.items()):
ret = proc.poll()
if ret is not None:
print("*** %s exited on start!" % name)
if os.path.exists("/tmp/alignak.log"):
with open("/tmp/alignak.log") as f:
for line in f:
print("xxx %s" % line[:-1])
if os.path.exists("%s/log/arbiter-master.log" % cfg_folder):
with open("%s/log/arbiter-master.log" % cfg_folder) as f:
for line in f:
print("... %s" % line[:-1])
if proc.stdout:
for line in iter(proc.stdout.readline, b''):
print(">>> " + str(line).rstrip())
else:
print("No stdout!")
if proc.stderr:
for line in iter(proc.stderr.readline, b''):
print(">>> " + str(line).rstrip())
else:
print("No stderr!")
assert ret is None, "Daemon %s not started!" % name
print("%s running (pid=%d)" % (name, self.procs[name].pid))
# Let the daemons start...
time.sleep(3)
print("%s - Testing pid files and log files..." % datetime.datetime.now())
for name in daemons_list + ['arbiter-master']:
if arbiter_only and name not in ['arbiter-master']:
continue
print("- %s for %s" % ('%s/run/%s.pid' % (run_folder, name), name))
# Some times pid and log files may not exist ...
if not os.path.exists('%s/run/%s.pid' % (run_folder, name)):
print('%s/run/%s.pid does not exist!' % (run_folder, name))
print("- %s for %s" % ('%s/log/%s.log' % (run_folder, name), name))
if not os.path.exists('%s/log/%s.log' % (run_folder, name)):
print('%s/log/%s.log does not exist!' % (run_folder, name))
time.sleep(1)
# Let the arbiter build and dispatch its configuration
# Let the schedulers get their configuration and run the first checks
time.sleep(runtime)
print("%s - after sleeping..." % datetime.datetime.now())
def _check_daemons_log_for_errors(self, daemons_list, run_folder='/tmp/alignak',
ignored_warnings=None, ignored_errors=None, dump_all=True):
"""
Check that the daemons all started correctly and that they got their configuration
ignored_warnings and ignored_errors are lists of strings that make a WARNING or ERROR log
not to be considered as a warning or error
:return:
"""
print("Get information from log files...")
travis_run = 'TRAVIS' in os.environ
if ignored_errors is None:
ignored_errors = []
if ignored_warnings is None:
ignored_warnings = []
ignored_warnings.extend([
u'Cannot call the additional groups setting ',
u'loop exceeded the maximum expected',
u'ignoring repeated file'
])
nb_errors = 0
nb_warnings = 0
for daemon in ['arbiter-master'] + daemons_list:
log_file = "/%s/log/%s.log" % (run_folder, daemon)
if not os.path.exists(log_file):
log_file = "/%s/run/%s.log" % (run_folder, daemon)
if not os.path.exists(log_file):
assert os.path.exists("%s/%s.log" % (self._launch_dir, daemon)), '%s/%s.log does not exist!' % (self._launch_dir, daemon)
log_file = "%s/%s.log" % (self._launch_dir, daemon)
daemon_errors = False
print("-----\n%s log file: %s\n-----\n" % (daemon,
'/%s/log/%s.log' % (run_folder, daemon)))
with open(log_file) as f:
for line in f:
if 'WARNING: ' in line or daemon_errors:
if dump_all and not travis_run:
print(line[:-1])
for ignore_line in ignored_warnings:
if ignore_line in line:
break
else:
nb_warnings += 1
print("-W-" + line[:-1])
if 'ERROR: ' in line or 'CRITICAL: ' in line:
if dump_all and not daemon_errors:
print(line[:-1])
for ignore_line in ignored_errors:
if ignore_line in line:
break
else:
nb_errors += 1
print("*E*" + line[:-1])
if nb_errors > 0:
daemon_errors = True
return (nb_errors, nb_warnings)
def setup_with_file(self, configuration_file=None, env_file=None,
verbose=False, unit_test=True, dispatching=False):
"""
Load alignak with the provided configuration and environment files
If verbose is True the environment loading is printed out on the console.
If the configuration loading fails, a SystemExit exception is raised to the caller.
The conf_is_correct property indicates if the configuration loading succeeded or failed.
The configuration errors property contains a list of the error message that are normally
logged as ERROR by the arbiter.
If unit_test is True it will simulate the dispatcher configuration sending
to the declared satellites in the configuration. Set to False if you intend to run
real daemons that will receive their configuration!
:param unit_test: set to False for integration tests
:type unit_test: bool
:param configuration_file: path + file name of the main configuration file
:type configuration_file: str
:param env_file: path + file name of the alignak environment file
:type env_file: str
:param verbose: load Alignak environment in verbose mode (defaults True)
:type verbose: bool
:param dispatching: simulate the dispatch of the parsed configuration
:type dispatching: bool
:return: None
"""
self.broks = []
# Our own satellites lists ...
self.arbiters = {}
self.schedulers = {}
self.brokers = {}
self.pollers = {}
self.receivers = {}
self.reactionners = {}
# Our own schedulers lists ...
# Indexed on the scheduler name
self._schedulers = {}
# The main arbiter and scheduler daemons
self._arbiter = None
self._scheduler_daemon = None
self._scheduler = None
self.conf_is_correct = False
self.configuration_warnings = []
self.configuration_errors = []
assert configuration_file or env_file
# current_dir = os.getcwd()
print("Current directory: %s" % self._test_dir)
if configuration_file:
if not os.path.exists(configuration_file):
configuration_file = os.path.join(self._test_dir, configuration_file)
if not os.path.exists(configuration_file):
# Error for the configuration file
print("Provided configuration file for the test does not exist anywhere: %s!"
% configuration_file)
raise SystemExit("No configuration file found for the test!")
configuration_dir = os.path.dirname(configuration_file)
print("Test configuration directory: %s, file: %s"
% (os.path.abspath(configuration_dir), configuration_file))
else:
if not os.path.exists(env_file):
env_file = os.path.join(self._test_dir, env_file)
if not os.path.exists(env_file):
# Error for the configuration file
print("Provided configuration file for the test does not exist anywhere: %s!"
% configuration_file)
raise SystemExit("No configuration file found for the test!")
configuration_dir = os.path.dirname(env_file)
print("Test configuration directory: %s, file: %s"
% (os.path.abspath(configuration_dir), env_file))
self.env_filename = env_file
if env_file is None:
self.env_filename = os.path.join(configuration_dir, 'alignak.ini')
if os.path.exists(os.path.join(configuration_dir, 'alignak.ini')):
# alignak.ini in the same directory as the legacy configuration file
self.env_filename = os.path.join(configuration_dir, 'alignak.ini')
elif os.path.exists(os.path.join(self._test_dir, './etc/alignak.ini')):
# alignak.ini in the test/etc directory
self.env_filename = os.path.join(self._test_dir, './etc/alignak.ini')
else:
print("No Alignak configuration file found for the test: %s!" % self.env_filename)
raise SystemExit("No Alignak configuration file found for the test!")
if self.env_filename != os.path.abspath(self.env_filename):
self.env_filename = os.path.join(self._test_dir, self.env_filename)
self.env_filename = os.path.abspath(self.env_filename)
print("Found Alignak environment file: %s" % self.env_filename)
# Get Alignak environment
self.alignak_env = AlignakConfigParser({
'<cfg_file>': self.env_filename, '--verbose': verbose})
self.alignak_env.parse()
arbiter_cfg = None
for daemon_section, daemon_cfg in list(self.alignak_env.get_daemons().items()):
if daemon_cfg['type'] == 'arbiter':
arbiter_cfg = daemon_cfg
arbiter_name = 'Default-Arbiter'
if arbiter_cfg:
arbiter_name = arbiter_cfg['name']
# Using default values that are usually provided by the command line parameters
args = {
'alignak_name': 'alignak-test', 'daemon_name': arbiter_name,
'env_file': self.env_filename
}
if configuration_file:
args.update({'legacy_cfg_files': [configuration_file]})
self._arbiter = Arbiter(**args)
try:
# Configure the logger
# self._arbiter.debug = True
self._arbiter.setup_alignak_logger()
# # Setup our modules manager
# self._arbiter.load_modules_manager()
# Load and initialize the arbiter configuration
self._arbiter.load_monitoring_config_file(clean=False)
# If this assertion does not match, then there is a bug in the arbiter :)
self.assertTrue(self._arbiter.conf.conf_is_correct)
self.conf_is_correct = True
self.configuration_warnings = self._arbiter.conf.configuration_warnings
self.configuration_errors = self._arbiter.conf.configuration_errors
except SystemExit:
self.configuration_warnings = self._arbiter.conf.configuration_warnings
self.configuration_errors = self._arbiter.conf.configuration_errors
self.show_configuration_logs()
self.show_logs()
raise
# Prepare the configuration dispatching
for arbiter_link in self._arbiter.conf.arbiters:
if arbiter_link.get_name() == self._arbiter.arbiter_name:
self._arbiter.link_to_myself = arbiter_link
assert arbiter_link is not None, "There is no arbiter link in the configuration!"
if not unit_test:
print("No unit testing, return...")
return
if not dispatching:
print("No dispatching, return...")
return
# Prepare the configuration dispatching
self._arbiter.dispatcher = Dispatcher(self._arbiter.conf, self._arbiter.link_to_myself)
self._arbiter.dispatcher.prepare_dispatch()
# Create an Arbiter external commands manager in dispatcher mode
self._arbiter.external_commands_manager = ExternalCommandManager(self._arbiter.conf,
'dispatcher',
self._arbiter,
accept_unknown=True)
print("All daemons address: %s" % ["%s:%s" % (link.address, link.port) for link in self._arbiter.dispatcher.all_daemons_links])
# Simulate the daemons HTTP interface (very simple simulation !)
with requests_mock.mock() as mr:
for link in self._arbiter.dispatcher.all_daemons_links:
# mr.get('http://%s:%s/ping' % (link.address, link.port), json='pong')
mr.get('http://%s:%s/identity' % (link.address, link.port),
json={"start_time": 0, "running_id": 123456.123456})
mr.get('http://%s:%s/_wait_new_conf' % (link.address, link.port), json=True)
mr.post('http://%s:%s/_push_configuration' % (link.address, link.port), json=True)
mr.get('http://%s:%s/_initial_broks' % (link.address, link.port), json=[])
mr.get('http://%s:%s/managed_configurations' % (link.address, link.port), json={})
self._arbiter.dispatcher.check_reachable(test=True)
# self._arbiter.dispatcher.dispatch(test=True)
self._arbiter.dispatcher.check_dispatch()
print("-----\nConfiguration got dispatched.")
# Check that all the daemons links got a configuration
for sat_type in ('arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers'):
if verbose:
print("- for %s:" % sat_type)
for sat_link in getattr(self._arbiter.dispatcher, sat_type):
if verbose:
print(" - %s" % sat_link)
pushed_configuration = getattr(sat_link, 'unit_test_pushed_configuration', None)
if pushed_configuration:
if verbose:
print(" pushed configuration, contains:")
for key in pushed_configuration:
print(" . %s = %s" % (key, pushed_configuration[key]))
# Update the test class satellites lists
getattr(self, sat_type).update({sat_link.name: pushed_configuration})
if verbose:
print("- my %s: %s" % (sat_type, list(getattr(self, sat_type).keys())))
self.eca = None
# Initialize a Scheduler daemon
for scheduler in self._arbiter.dispatcher.schedulers:
print("-----\nGot a scheduler: %s (%s)" % (scheduler.name, scheduler))
# Simulate the scheduler daemon start
args = {
'env_file': self.env_filename, 'daemon_name': scheduler.name,
}
self._scheduler_daemon = Alignak(**args)
# self._scheduler_daemon.load_modules_manager()
# Simulate the scheduler daemon receiving the configuration from its arbiter
pushed_configuration = scheduler.unit_test_pushed_configuration
self._scheduler_daemon.new_conf = pushed_configuration
self._scheduler_daemon.setup_new_conf()
assert self._scheduler_daemon.new_conf == {}
self._schedulers[scheduler.name] = self._scheduler_daemon.sched
# Store the last scheduler object to get used in some other functions!
# this is the real scheduler, not the scheduler daemon!
self._scheduler = self._scheduler_daemon.sched
self._scheduler.my_daemon = self._scheduler_daemon
print("Got a default scheduler: %s\n-----" % self._scheduler)
# Initialize a Broker daemon
for broker in self._arbiter.dispatcher.brokers:
print("-----\nGot a broker: %s (%s)" % (broker.name, broker))
# Simulate the broker daemon start
args = {
'env_file': self.env_filename, 'daemon_name': broker.name,
}
self._broker_daemon = Broker(**args)
# self._broker_daemon.load_modules_manager()
# Simulate the scheduler daemon receiving the configuration from its arbiter
pushed_configuration = broker.unit_test_pushed_configuration
self._broker_daemon.new_conf = pushed_configuration
self._broker_daemon.setup_new_conf()
assert self._broker_daemon.new_conf == {}
print("Got a default broker daemon: %s\n-----" % self._broker_daemon)
# Get my first broker link
self._main_broker = None
if self._scheduler.my_daemon.brokers:
self._main_broker = [b for b in list(self._scheduler.my_daemon.brokers.values())][0]
print("Main broker: %s" % self._main_broker)
# Initialize a Receiver daemon
self._receiver = None
for receiver in self._arbiter.dispatcher.receivers:
print("-----\nGot a receiver: %s (%s)" % (receiver.name, receiver))
# Simulate the receiver daemon start
args = {
'env_file': self.env_filename, 'daemon_name': receiver.name,
}
self._receiver_daemon = Receiver(**args)
# self._receiver_daemon.load_modules_manager()
# Simulate the scheduler daemon receiving the configuration from its arbiter
pushed_configuration = receiver.unit_test_pushed_configuration
self._receiver_daemon.new_conf = pushed_configuration
self._receiver_daemon.setup_new_conf()
assert self._receiver_daemon.new_conf == {}
self._receiver = receiver
print("Got a default receiver: %s\n-----" % self._receiver)
# for scheduler in self._receiver_daemon.schedulers.values():
# scheduler.my_daemon = self._receiver_daemon
self.ecm_mode = 'applyer'
# Now we create an external commands manager in receiver mode
self.ecr = None
if self._receiver:
self.ecr = ExternalCommandManager(None, 'receiver', self._receiver_daemon,
accept_unknown=True)
self._receiver.external_commands_manager = self.ecr
# and an external commands manager in dispatcher mode for the arbiter
self.ecd = ExternalCommandManager(self._arbiter.conf, 'dispatcher', self._arbiter,
accept_unknown=True)
self._arbiter.modules_manager.stop_all()
self._broker_daemon.modules_manager.stop_all()
self._scheduler_daemon.modules_manager.stop_all()
if self._receiver:
self._receiver_daemon.modules_manager.stop_all()
def fake_check(self, ref, exit_status, output="OK"):
"""
Simulate a check execution and result
:param ref: host/service concerned by the check
:param exit_status: check exit status code (0, 1, ...).
If set to None, the check is simply scheduled but not "executed"
:param output: check output (output + perf data)
:return:
"""
now = time.time()
check = ref.schedule(self._scheduler.hosts,
self._scheduler.services,
self._scheduler.timeperiods,
self._scheduler.macromodulations,
self._scheduler.checkmodulations,
self._scheduler.checks,
force=True, force_time=None)
# now the check is scheduled and we get it in the action queue
self._scheduler.add(check) # check is now in sched.checks[]
# Allows to force check scheduling without setting its status nor output.
# Useful for manual business rules rescheduling, for instance.
if exit_status is None:
return
# fake execution
check.check_time = now
# and lie about when we will launch it because
# if not, the schedule call for ref
# will not really reschedule it because there
# is a valid value in the future
ref.next_chk = now - 0.5
# Max plugin output is default to 8192
check.get_outputs(output, 8192)
check.exit_status = exit_status
check.execution_time = 0.001
check.status = 'waitconsume'
# Put the check result in the waiting results for the scheduler ...
self._scheduler.waiting_results.put(check)
def scheduler_loop(self, count, items=None, scheduler=None):
"""
Manage scheduler actions
:param count: number of loop turns to run
:type count: int
:param items: list of list [[object, exist_status, output]]
:type items: list
:param scheduler: The scheduler
:type scheduler: None | alignak.daemons.SchedulerDaemon
:return: None
"""
if scheduler is None:
scheduler = self._scheduler
assert scheduler is not None
if items is None:
items = []
macroresolver = MacroResolver()
macroresolver.init(scheduler.my_daemon.sched.pushed_conf)
for num in range(count):
# print("Scheduler loop turn: %s" % num)
for (item, exit_status, output) in items:
print("- item checks creation turn: %s" % item)
if len(item.checks_in_progress) == 0:
# A first full scheduler loop turn to create the checks
# if they do not yet exist!
for i in scheduler.recurrent_works:
(name, fun, nb_ticks) = scheduler.recurrent_works[i]
if nb_ticks == 1:
try:
# print(" . %s ...running." % name)
fun()
except Exception as exp:
print("Exception: %s\n%s" % (exp, traceback.format_exc()))
# else:
# print(" . %s ...ignoring, period: %d" % (name, nb_ticks))
else:
print("*** check is still in progress for %s!" % (item.get_full_name()))
self.assertGreater(len(item.checks_in_progress), 0)
chk = scheduler.checks[item.checks_in_progress[0]]
chk.set_type_active()
chk.check_time = time.time()
chk.wait_time = 0.0001
chk.last_poll = chk.check_time
chk.output = output
chk.exit_status = exit_status
scheduler.waiting_results.put(chk)
# print("-----\n- results fetching turn:")
for i in scheduler.recurrent_works:
(name, fun, nb_ticks) = scheduler.recurrent_works[i]
if nb_ticks == 1:
try:
# print(" . %s ...running." % name)
fun()
except Exception as exp:
print("Exception: %s\n%s" % (exp, traceback.format_exc()))
assert False
# else:
# print(" . %s ...ignoring, period: %d" % (name, nb_ticks))
self.assert_no_log_match("External command Brok could not be sent to any daemon!")
def manage_freshness_check(self, count=1, mysched=None):
"""Run the scheduler loop for freshness_check
:param count: number of scheduler loop turns
:type count: int
:param mysched: a specific scheduler to get used
:type mysched: None | object
:return: n/a
"""
checks = []
for num in range(count):
for i in self._scheduler.recurrent_works:
(name, fun, nb_ticks) = self._scheduler.recurrent_works[i]
if nb_ticks == 1:
fun()
if name == 'check_freshness':
checks = sorted(list(self._scheduler.checks.values()),
key=lambda x: x.creation_time)
checks = [chk for chk in checks if chk.freshness_expiry_check]
return len(checks)
def manage_external_command(self, external_command, run=True):
"""Manage an external command.
:return: result of external command resolution
"""
res = None
ext_cmd = ExternalCommand(external_command)
if self.ecm_mode == 'applyer':
res = None
self._scheduler.run_external_commands([external_command])
self.external_command_loop()
if self.ecm_mode == 'dispatcher':
res = self.ecd.resolve_command(ext_cmd)
if res and run:
self._arbiter.broks = []
self._arbiter.add(ext_cmd)
self._arbiter.push_external_commands_to_schedulers()
if self.ecm_mode == 'receiver':
res = self.ecr.resolve_command(ext_cmd)
if res and run:
self._receiver_daemon.broks = []
self._receiver_daemon.add(ext_cmd)
# self._receiver_daemon.push_external_commands_to_schedulers()
# # Our scheduler
# self._scheduler = self.schedulers['scheduler-master'].sched
# Give broks to our broker
for brok in self._receiver_daemon.broks:
print("Brok receiver: %s" % brok)
self._broker_daemon.external_broks.append(brok)
return res
def external_command_loop(self, count=1):
"""Execute the scheduler actions for external commands.
The scheduler is not an ECM 'dispatcher' but an 'applyer' ... so this function is on
the external command execution side of the problem.
:return:
"""
self.scheduler_loop(count=count)
def worker_loop(self, verbose=True):
self._scheduler.delete_zombie_checks()
self._scheduler.delete_zombie_actions()
checks = self._scheduler.get_to_run_checks(True, False, worker_name='tester')
actions = self._scheduler.get_to_run_checks(False, True, worker_name='tester')
if verbose is True:
self.show_actions()
for a in actions:
a.status = u'in_poller'
a.check_time = time.time()
a.exit_status = 0
self._scheduler.put_results(a)
if verbose is True:
self.show_actions()
def launch_internal_check(self, svc_br):
""" Launch an internal check for the business rule service provided """
# Launch an internal check
now = time.time()
self._scheduler.add(svc_br.launch_check(now - 1,
self._scheduler.hosts,
self._scheduler.services,
self._scheduler.timeperiods,
self._scheduler.macromodulations,
self._scheduler.checkmodulations,
self._scheduler.checks))
c = svc_br.actions[0]
self.assertEqual(True, c.internal)
self.assertTrue(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one to launch the check
# and another to get the result
self.scheduler_loop(2, [])
# We should not have the check anymore
self.assertEqual(0, len(svc_br.actions))
def show_logs(self):
"""Show logs. Get logs collected by the unit tests collector handler and print them"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if isinstance(handler, CollectorHandler):
print("--- logs <<<----------------------------------")
for log in handler.collector:
print(log)
print("--- logs >>>----------------------------------")
break
else:
assert False, "Alignak test Logger is not initialized correctly!"
def show_actions(self):
""""Show the inner actions"""
macroresolver = MacroResolver()
macroresolver.init(self._scheduler_daemon.sched.pushed_conf)
print("--- Scheduler: %s" % self._scheduler.my_daemon.name)
print("--- actions >>>")
actions = sorted(list(self._scheduler.actions.values()), key=lambda x: (x.t_to_go, x.creation_time))
for action in actions:
print("Time to launch action: %s, creation: %s, now: %s" % (action.t_to_go, action.creation_time, time.time()))
if action.is_a == 'notification':
item = self._scheduler.find_item_by_id(action.ref)
if item.my_type == "host":
ref = "host: %s" % item.get_name()
else:
hst = self._scheduler.find_item_by_id(item.host)
ref = "svc: %s/%s" % (hst.get_name(), item.get_name())
print("NOTIFICATION %s (%s - %s) [%s], created: %s for '%s': %s"
% (action.type, action.uuid, action.status, ref,
time.asctime(time.localtime(action.t_to_go)),
action.contact_name, action.command))
elif action.is_a == 'eventhandler':
print("EVENTHANDLER:", action)
else:
print("ACTION:", action)
print("<<< actions ---")
def show_checks(self):
"""
Show checks from the scheduler
:return:
"""
print("--- Scheduler: %s" % self._scheduler.my_daemon.name)
print("--- checks >>>")
checks = sorted(list(self._scheduler.checks.values()), key=lambda x: x.creation_time)
for check in checks:
print("- %s" % check)
print("<<< checks ---")
def show_events(self):
"""
Show the events
:return:
"""
my_broker = [b for b in list(self._scheduler.my_daemon.brokers.values())][0]
monitoring_logs = []
for event in self._scheduler_daemon.events:
data = unserialize(event.data)
monitoring_logs.append((data['level'], data['message']))
for log in monitoring_logs:
print(log)
def show_and_clear_actions(self):
self.show_actions()
self.clear_actions()
def count_logs(self):
"""Count the logs collected by the unit tests collector handler and print them"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if isinstance(handler, CollectorHandler):
return len(handler.collector)
else:
assert False, "Alignak test Logger is not initialized correctly!"
def count_actions(self):
"""
Count the actions in the scheduler's actions.
@verified
:return:
"""
return len(list(self._scheduler.actions.values()))
def clear_logs(self):
"""
Remove all the logs stored in the logs collector
@verified
:return:
"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if isinstance(handler, CollectorHandler):
handler.collector = []
break
# else:
# assert False, "Alignak test Logger is not initialized correctly!"
def clear_actions(self):
"""
Clear the actions in the scheduler's actions.
:return:
"""
self._scheduler.actions = {}
def clear_checks(self):
"""
Clear the checks in the scheduler's checks.
:return:
"""
self._scheduler.checks = {}
def clear_events(self, daemon=None):
"""
Clear the checks in the scheduler's checks.
:return:
"""
if daemon is None:
daemon = self._scheduler_daemon
daemon.events = []
def assert_actions_count(self, number):
"""
Check the number of actions
:param number: number of actions we must have
:type number: int
:return: None
"""
actions = []
# I do this because sort take too times
if number != len(self._scheduler.actions):
actions = sorted(list(self._scheduler.actions.values()), key=lambda x: x.creation_time)
self.assertEqual(number, len(self._scheduler.actions),
"Not found expected number of actions:\nactions_logs=[[[\n%s\n]]]" %
('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, '
'planned: %s, command: %s' %
(idx, b.creation_time, b.is_a, b.type,
b.status, b.t_to_go, b.command)
for idx, b in enumerate(sorted(self._scheduler.actions.values(),
key=lambda x: (x.creation_time,
x.t_to_go))))))
def assert_actions_match(self, index, pattern, field):
"""
Check if pattern verified in field(property) name of the action with index in action list
@verified
:param index: index in the actions list. If index is -1, all the actions in the list are
searched for a matching pattern
:type index: int
:param pattern: pattern to verify is in the action
:type pattern: str
:param field: name of the field (property) of the action
:type field: str
:return: None
"""
regex = re.compile(pattern)
actions = sorted(self._scheduler.actions.values(), key=lambda x: (x.t_to_go, x.creation_time))
if index != -1:
myaction = actions[index]
self.assertTrue(regex.search(getattr(myaction, field)),
"Not found a matching pattern in actions:\n"
"index=%s field=%s pattern=%r\n"
"action_line=creation: %s, is_a: %s, type: %s, "
"status: %s, planned: %s, command: %s" % (
index, field, pattern, myaction.creation_time, myaction.is_a,
myaction.type, myaction.status, myaction.t_to_go, myaction.command))
return
for myaction in actions:
if regex.search(getattr(myaction, field)):
return
self.assertTrue(False,
"Not found a matching pattern in actions:\nfield=%s pattern=%r\n" %
(field, pattern))
def assert_log_count(self, number):
"""
Check the number of log
:param number: number of logs we must have
:type number: int
:return: None
"""
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if isinstance(handler, CollectorHandler):
self.assertEqual(number, len(handler.collector),
"Not found expected number of logs: %s vs %s"
% (number, len(handler.collector)))
break
else:
assert False, "Alignak test Logger is not initialized correctly!"
def assert_log_match(self, pattern, index=None):
"""
Search if the log with the index number has the pattern in the Arbiter logs.
If index is None, then all the collected logs are searched for the pattern
Logs numbering starts from 0 (the oldest stored log line)
This function assert on the search result. As of it, if no log is found with th search
criteria an assertion is raised and the test stops on error.
:param pattern: string to search in log
:type pattern: str
:param index: index number
:type index: int
:return: None
"""
self.assertIsNotNone(pattern, "Searched pattern can not be None!")
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not isinstance(handler, CollectorHandler):
continue
regex = re.compile(pattern)
log_num = 0
found = False
for log in handler.collector:
if index is None:
if regex.search(log):
found = True
break
elif index == log_num:
if regex.search(log):
found = True
break
log_num += 1
self.assertTrue(found,
"Not found a matching log line in logs:\nindex=%s pattern=%r\n"
"logs=[[[\n%s\n]]]"
% (index, pattern, '\n'.join('\t%s=%s' % (idx, b.strip())
for idx, b in
enumerate(handler.collector))))
break
else:
assert False, "Alignak test Logger is not initialized correctly!"
def assert_checks_count(self, number):
"""
Check the number of actions
@verified
:param number: number of actions we must have
:type number: int
:return: None
"""
checks = sorted(list(self._scheduler.checks.values()), key=lambda x: x.creation_time)
self.assertEqual(number, len(checks),
"Not found expected number of checks:\nchecks_logs=[[[\n%s\n]]]" %
('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, planned: %s, '
'command: %s' %
(idx, b.creation_time, b.is_a, b.type, b.status, b.t_to_go, b.command)
for idx, b in enumerate(checks))))
def assert_checks_match(self, index, pattern, field):
"""
Check if pattern verified in field(property) name of the check with index in check list
@verified
:param index: index number of checks list
:type index: int
:param pattern: pattern to verify is in the check
:type pattern: str
:param field: name of the field (property) of the check
:type field: str
:return: None
"""
regex = re.compile(pattern)
checks = sorted(list(self._scheduler.checks.values()), key=lambda x: x.creation_time)
mycheck = checks[index]
self.assertTrue(regex.search(getattr(mycheck, field)),
"Not found a matching pattern in checks:\nindex=%s field=%s pattern=%r\n"
"check_line=creation: %s, is_a: %s, type: %s, status: %s, planned: %s, "
"command: %s" % (
index, field, pattern, mycheck.creation_time, mycheck.is_a,
mycheck.type, mycheck.status, mycheck.t_to_go, mycheck.command))
def _any_check_match(self, pattern, field, assert_not):
"""
Search if any check matches the requested pattern
@verified
:param pattern:
:param field to search with pattern:
:param assert_not:
:return:
"""
regex = re.compile(pattern)
checks = sorted(list(self._scheduler.checks.values()), key=lambda x: x.creation_time)
for check in checks:
if re.search(regex, getattr(check, field)):
self.assertTrue(not assert_not,
"Found check:\nfield=%s pattern=%r\n"
"check_line=creation: %s, is_a: %s, type: %s, status: %s, "
"planned: %s, command: %s" % (
field, pattern, check.creation_time, check.is_a,
check.type, check.status, check.t_to_go, check.command)
)
return
self.assertTrue(assert_not, "No matching check found:\n"
"pattern = %r\n" "checks = %r" % (pattern, checks))
def assert_any_check_match(self, pattern, field):
"""
Assert if any check matches the pattern
@verified
:param pattern:
:param field to search with pattern:
:return:
"""
self._any_check_match(pattern, field, assert_not=False)
def assert_no_check_match(self, pattern, field):
"""
Assert if no check matches the pattern
@verified
:param pattern:
:param field to search with pattern:
:return:
"""
self._any_check_match(pattern, field, assert_not=True)
def _any_log_match(self, pattern, assert_not):
"""
Search if any log in the Arbiter logs matches the requested pattern
If 'scheduler' is True, then uses the scheduler's broks list.
@verified
:param pattern:
:param assert_not:
:return:
"""
self.assertIsNotNone(pattern, "Searched pattern can not be None!")
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if not isinstance(handler, CollectorHandler):
continue
# print("-----\nParsing collector handler log events...")
# print("Searching for: %s (%s)" % (pattern, type(pattern)))
try:
regex = re.compile(pattern, re.ASCII)
except AttributeError:
regex = re.compile(pattern)
for log in handler.collector:
if re.search(regex, log):
# print("# found: %s" % (log))
self.assertTrue(
not assert_not,
"Found matching log line, pattern: %r\nlog: %r" % (pattern, log)
)
break
else:
# # Dump all known log events for analysis
# for log in handler.collector:
# print(". %s (%s)" % (repr(log), type(log)))
self.assertTrue(assert_not,
"No matching log line found, pattern: %r\n" % pattern)
break
else:
assert False, "Alignak test Logger is not initialized correctly!"
def assert_any_log_match(self, pattern):
"""Assert if any of the collected log matches the pattern
:param pattern:
:return:
"""
self._any_log_match(pattern, assert_not=False)
def assert_no_log_match(self, pattern):
"""Assert if no collected log matches the pattern
:param pattern:
:return:
"""
self._any_log_match(pattern, assert_not=True)
def _any_brok_match(self, pattern, level, assert_not):
"""
Search if any brok message in the Scheduler broks matches the requested pattern and
requested level
@verified
:param pattern:
:param assert_not:
:return:
"""
regex = re.compile(pattern)
my_broker = [b for b in list(self._scheduler.my_daemon.brokers.values())][0]
monitoring_logs = []
print("Broker broks: %s" % my_broker.broks)
for brok in my_broker.broks:
if brok.type == 'monitoring_log':
data = brok.prepare()
monitoring_logs.append((data['level'], data['message']))
if re.search(regex, data['message']) and (level is None or data['level'] == level):
self.assertTrue(not assert_not, "Found matching brok:\n"
"pattern = %r\nbrok message = %r" % (pattern, data['message']))
return
self.assertTrue(assert_not, "No matching brok found:\n"
"pattern = %r\n" "monitring log = %r" % (pattern,
monitoring_logs))
def assert_any_brok_match(self, pattern, level=None):
"""
Search if any brok message in the Scheduler broks matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
"""
self._any_brok_match(pattern, level, assert_not=False)
def assert_no_brok_match(self, pattern, level=None):
"""
Search if no brok message in the Scheduler broks matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
"""
self._any_brok_match(pattern, level, assert_not=True)
def get_monitoring_events(self, daemon=None, no_date=False):
""" This function gets the monitoring events from the provided daemon
If no daemon is specified, it will get from the default Scheduler
the event Broks are sorted by ascending creation timestamp
If no_date is specified, then the events list will be filtered and the vents data will
not be returned. This makes it really easier for the unit tests that do not need to care
about the events timestamp to check if an event is raised or not!
:return:
"""
if daemon is None:
daemon = self._scheduler_daemon
monitoring_logs = []
for brok in sorted(daemon.events, key=lambda x: x.creation_time):
ts, level, message = brok.get_event()
print("Event: %s / %s / %s" % (ts, level, message))
if no_date:
monitoring_logs.append((level, message))
else:
monitoring_logs.append((ts, level, message))
return monitoring_logs
def check_monitoring_events_log(self, expected_logs, dump=True, assert_length=True):
"""
Get the monitoring_log broks and check that they match with the expected_logs provided
:param expected_logs: expected monitoring logs
:param dump: True to print out the monitoring logs
:param assert_length: True to compare list lengths
:return:
"""
# We got 'monitoring_log' broks for logging to the monitoring events..
# no_date to avoid comparing the events timestamp !
monitoring_events = self.get_monitoring_events(no_date=True)
if dump:
print("Monitoring events: ")
for level, message in monitoring_events:
print("- ('%s', '%s')" % (level, message))
for log_level, log_message in expected_logs:
try:
assert (log_level, log_message) in monitoring_events, "Not found :%s" % log_message
except UnicodeDecodeError:
assert (log_level.decode('utf8', 'ignore'), log_message.decode('utf8', 'ignore')) in monitoring_events, "Not found :%s" % log_message
if not assert_length:
return
assert len(expected_logs) == len(monitoring_events), "Length do not match: %d" \
% len(monitoring_events)
def _any_event_match(self, pattern, level, assert_not):
"""
Search if any event message in the Arbiter events matches the requested pattern and
requested level
@verified
:param pattern:
:param assert_not:
:return:
"""
regex = re.compile(pattern)
my_broker = [b for b in list(self._scheduler.my_daemon.brokers.values())][0]
print("Broker broks: %s" % my_broker.broks)
for brok in my_broker.broks:
print("- %s" % brok)
monitoring_logs = []
print("Arbiter events: %s" % self._arbiter.events)
print("Scheduler events: %s" % self._scheduler_daemon.events)
print("Receiver events: %s" % self._receiver_daemon.events)
for event in self._scheduler_daemon.events:
data = event.prepare()
monitoring_logs.append((data['level'], data['message']))
if re.search(regex, data['message']) and (level is None or data['level'] == level):
self.assertTrue(not assert_not,
"Found matching event:\npattern = %r\nevent message = %r"
% (pattern, data['message']))
return
self.assertTrue(assert_not,
"No matching event found:\npattern = %r\n" "event message = %r"
% (pattern, monitoring_logs))
def assert_any_event_match(self, pattern, level=None):
"""
Search if any event message in the Scheduler events matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
"""
self._any_event_match(pattern, level, assert_not=False)
def assert_no_event_match(self, pattern, level=None):
"""
Search if no event message in the Scheduler events matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
"""
self._any_event_match(pattern, level, assert_not=True)
def get_log_match(self, pattern):
"""Get the collected logs matching the provided pattern"""
self.assertIsNotNone(pattern, "Searched pattern can not be None!")
logger_ = logging.getLogger(ALIGNAK_LOGGER_NAME)
for handler in logger_.handlers:
if isinstance(handler, CollectorHandler):
regex = re.compile(pattern)
res = []
for log in handler.collector:
if re.search(regex, log):
res.append(log)
return res
else:
assert False, "Alignak test Logger is not initialized correctly!"
def show_configuration_logs(self):
"""
Prints the configuration logs
@verified
:return:
"""
print("Configuration warnings:")
for msg in self.configuration_warnings:
print(" - %s" % msg)
print("Configuration errors:")
for msg in self.configuration_errors:
print(" - %s" % msg)
def _any_cfg_log_match(self, pattern, assert_not):
"""
Search a pattern in configuration log (warning and error)
@verified
:param pattern:
:return:
"""
regex = re.compile(pattern)
cfg_logs = self.configuration_warnings + self.configuration_errors
for log in cfg_logs:
if re.search(regex, log):
self.assertTrue(not assert_not,
"Found matching log line:\n"
"pattern = %r\nlog = %r" % (pattern, log))
return
self.assertTrue(assert_not, "No matching log line found:\n"
"pattern = %r\n" "logs = %r" % (pattern, cfg_logs))
def assert_any_cfg_log_match(self, pattern):
"""
Assert if any configuration log matches the pattern
@verified
:param pattern:
:return:
"""
self._any_cfg_log_match(pattern, assert_not=False)
def assert_no_cfg_log_match(self, pattern):
"""
Assert if no configuration log matches the pattern
@verified
:param pattern:
:return:
"""
self._any_cfg_log_match(pattern, assert_not=True)
|
class AlignakTest(unittest2.TestCase):
def assertRegex(self, *args, **kwargs):
pass
def setUp(self):
'''All tests initialization:
- output test identifier
- setup test logger
- track running Alignak daemons
- output system cpu/memory
'''
pass
def tearDown(self):
'''Test ending:
- restore initial log level if it got changed
'''
pass
def set_unit_tests_logger_level(self, log_level=logging.DEBUG):
'''Set the test logger at the provided level -
useful for some tests that check debug log
'''
pass
def _prepare_hosts_configuration(self, cfg_folder, hosts_count=10,
target_file_name=None, realms=None):
'''Prepare the Alignak configuration
:return: the count of errors raised in the log files
'''
pass
def _prepare_configuration(self, copy=True, cfg_folder='/tmp/alignak', daemons_list=None):
pass
def _files_update(self, files, replacements):
'''Update files content with the defined replacements
:param files: list of files to parse and replace
:param replacements: list of values to replace
:return:
'''
pass
def _stop_alignak_daemons(self, arbiter_only=True, request_stop_uri=''):
''' Stop the Alignak daemons started formerly
If request_stop is set, this function will try to stop the daemons with the
/stop_request API, else it will directly send a kill signal.
If some alignak- daemons are still running after the kill, force kill them.
:return: None
'''
pass
def _run_command_with_timeout(self, cmd, timeout_sec):
'''Execute `cmd` in a subprocess and enforce timeout `timeout_sec` seconds.
Return subprocess exit code on natural completion of the subprocess.
Returns None if timeout expires before subprocess completes.'''
pass
def _run_alignak_daemons(self, cfg_folder='/tmp/alignak', runtime=30,
daemons_list=None, spare_daemons=[], piped=False, run_folder='',
arbiter_only=True, update_configuration=True, verbose=False):
''' Run the Alignak daemons for a passive configuration
Let the daemons run for the number of seconds defined in the runtime parameter and
then kill the required daemons (list in the spare_daemons parameter)
Check that the run daemons did not raised any ERROR log
:return: None
'''
pass
def _check_daemons_log_for_errors(self, daemons_list, run_folder='/tmp/alignak',
ignored_warnings=None, ignored_errors=None, dump_all=True):
'''
Check that the daemons all started correctly and that they got their configuration
ignored_warnings and ignored_errors are lists of strings that make a WARNING or ERROR log
not to be considered as a warning or error
:return:
'''
pass
def setup_with_file(self, configuration_file=None, env_file=None,
verbose=False, unit_test=True, dispatching=False):
'''
Load alignak with the provided configuration and environment files
If verbose is True the environment loading is printed out on the console.
If the configuration loading fails, a SystemExit exception is raised to the caller.
The conf_is_correct property indicates if the configuration loading succeeded or failed.
The configuration errors property contains a list of the error message that are normally
logged as ERROR by the arbiter.
If unit_test is True it will simulate the dispatcher configuration sending
to the declared satellites in the configuration. Set to False if you intend to run
real daemons that will receive their configuration!
:param unit_test: set to False for integration tests
:type unit_test: bool
:param configuration_file: path + file name of the main configuration file
:type configuration_file: str
:param env_file: path + file name of the alignak environment file
:type env_file: str
:param verbose: load Alignak environment in verbose mode (defaults True)
:type verbose: bool
:param dispatching: simulate the dispatch of the parsed configuration
:type dispatching: bool
:return: None
'''
pass
def fake_check(self, ref, exit_status, output="OK"):
'''
Simulate a check execution and result
:param ref: host/service concerned by the check
:param exit_status: check exit status code (0, 1, ...).
If set to None, the check is simply scheduled but not "executed"
:param output: check output (output + perf data)
:return:
'''
pass
def scheduler_loop(self, count, items=None, scheduler=None):
'''
Manage scheduler actions
:param count: number of loop turns to run
:type count: int
:param items: list of list [[object, exist_status, output]]
:type items: list
:param scheduler: The scheduler
:type scheduler: None | alignak.daemons.SchedulerDaemon
:return: None
'''
pass
def manage_freshness_check(self, count=1, mysched=None):
'''Run the scheduler loop for freshness_check
:param count: number of scheduler loop turns
:type count: int
:param mysched: a specific scheduler to get used
:type mysched: None | object
:return: n/a
'''
pass
def manage_external_command(self, external_command, run=True):
'''Manage an external command.
:return: result of external command resolution
'''
pass
def external_command_loop(self, count=1):
'''Execute the scheduler actions for external commands.
The scheduler is not an ECM 'dispatcher' but an 'applyer' ... so this function is on
the external command execution side of the problem.
:return:
'''
pass
def worker_loop(self, verbose=True):
pass
def launch_internal_check(self, svc_br):
''' Launch an internal check for the business rule service provided '''
pass
def show_logs(self):
'''Show logs. Get logs collected by the unit tests collector handler and print them'''
pass
def show_actions(self):
'''"Show the inner actions'''
pass
def show_checks(self):
'''
Show checks from the scheduler
:return:
'''
pass
def show_events(self):
'''
Show the events
:return:
'''
pass
def show_and_clear_actions(self):
pass
def count_logs(self):
'''Count the logs collected by the unit tests collector handler and print them'''
pass
def count_actions(self):
'''
Count the actions in the scheduler's actions.
@verified
:return:
'''
pass
def clear_logs(self):
'''
Remove all the logs stored in the logs collector
@verified
:return:
'''
pass
def clear_actions(self):
'''
Clear the actions in the scheduler's actions.
:return:
'''
pass
def clear_checks(self):
'''
Clear the checks in the scheduler's checks.
:return:
'''
pass
def clear_events(self, daemon=None):
'''
Clear the checks in the scheduler's checks.
:return:
'''
pass
def assert_actions_count(self, number):
'''
Check the number of actions
:param number: number of actions we must have
:type number: int
:return: None
'''
pass
def assert_actions_match(self, index, pattern, field):
'''
Check if pattern verified in field(property) name of the action with index in action list
@verified
:param index: index in the actions list. If index is -1, all the actions in the list are
searched for a matching pattern
:type index: int
:param pattern: pattern to verify is in the action
:type pattern: str
:param field: name of the field (property) of the action
:type field: str
:return: None
'''
pass
def assert_log_count(self, number):
'''
Check the number of log
:param number: number of logs we must have
:type number: int
:return: None
'''
pass
def assert_log_match(self, pattern, index=None):
'''
Search if the log with the index number has the pattern in the Arbiter logs.
If index is None, then all the collected logs are searched for the pattern
Logs numbering starts from 0 (the oldest stored log line)
This function assert on the search result. As of it, if no log is found with th search
criteria an assertion is raised and the test stops on error.
:param pattern: string to search in log
:type pattern: str
:param index: index number
:type index: int
:return: None
'''
pass
def assert_checks_count(self, number):
'''
Check the number of actions
@verified
:param number: number of actions we must have
:type number: int
:return: None
'''
pass
def assert_checks_match(self, index, pattern, field):
'''
Check if pattern verified in field(property) name of the check with index in check list
@verified
:param index: index number of checks list
:type index: int
:param pattern: pattern to verify is in the check
:type pattern: str
:param field: name of the field (property) of the check
:type field: str
:return: None
'''
pass
def _any_check_match(self, pattern, field, assert_not):
'''
Search if any check matches the requested pattern
@verified
:param pattern:
:param field to search with pattern:
:param assert_not:
:return:
'''
pass
def assert_any_check_match(self, pattern, field):
'''
Assert if any check matches the pattern
@verified
:param pattern:
:param field to search with pattern:
:return:
'''
pass
def assert_no_check_match(self, pattern, field):
'''
Assert if no check matches the pattern
@verified
:param pattern:
:param field to search with pattern:
:return:
'''
pass
def _any_log_match(self, pattern, assert_not):
'''
Search if any log in the Arbiter logs matches the requested pattern
If 'scheduler' is True, then uses the scheduler's broks list.
@verified
:param pattern:
:param assert_not:
:return:
'''
pass
def assert_any_log_match(self, pattern):
'''Assert if any of the collected log matches the pattern
:param pattern:
:return:
'''
pass
def assert_no_log_match(self, pattern):
'''Assert if no collected log matches the pattern
:param pattern:
:return:
'''
pass
def _any_brok_match(self, pattern, level, assert_not):
'''
Search if any brok message in the Scheduler broks matches the requested pattern and
requested level
@verified
:param pattern:
:param assert_not:
:return:
'''
pass
def assert_any_brok_match(self, pattern, level=None):
'''
Search if any brok message in the Scheduler broks matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
'''
pass
def assert_no_brok_match(self, pattern, level=None):
'''
Search if no brok message in the Scheduler broks matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
'''
pass
def get_monitoring_events(self, daemon=None, no_date=False):
''' This function gets the monitoring events from the provided daemon
If no daemon is specified, it will get from the default Scheduler
the event Broks are sorted by ascending creation timestamp
If no_date is specified, then the events list will be filtered and the vents data will
not be returned. This makes it really easier for the unit tests that do not need to care
about the events timestamp to check if an event is raised or not!
:return:
'''
pass
def check_monitoring_events_log(self, expected_logs, dump=True, assert_length=True):
'''
Get the monitoring_log broks and check that they match with the expected_logs provided
:param expected_logs: expected monitoring logs
:param dump: True to print out the monitoring logs
:param assert_length: True to compare list lengths
:return:
'''
pass
def _any_event_match(self, pattern, level, assert_not):
'''
Search if any event message in the Arbiter events matches the requested pattern and
requested level
@verified
:param pattern:
:param assert_not:
:return:
'''
pass
def assert_any_event_match(self, pattern, level=None):
'''
Search if any event message in the Scheduler events matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
'''
pass
def assert_no_event_match(self, pattern, level=None):
'''
Search if no event message in the Scheduler events matches the requested pattern and
requested level
@verified
:param pattern:
:param scheduler:
:return:
'''
pass
def get_log_match(self, pattern):
'''Get the collected logs matching the provided pattern'''
pass
def show_configuration_logs(self):
'''
Prints the configuration logs
@verified
:return:
'''
pass
def _any_cfg_log_match(self, pattern, assert_not):
'''
Search a pattern in configuration log (warning and error)
@verified
:param pattern:
:return:
'''
pass
def assert_any_cfg_log_match(self, pattern):
'''
Assert if any configuration log matches the pattern
@verified
:param pattern:
:return:
'''
pass
def assert_no_cfg_log_match(self, pattern):
'''
Assert if no configuration log matches the pattern
@verified
:param pattern:
:return:
'''
pass
| 56 | 51 | 30 | 4 | 19 | 8 | 5 | 0.43 | 1 | 27 | 10 | 5 | 55 | 30 | 55 | 55 | 1,735 | 250 | 1,037 | 264 | 976 | 450 | 897 | 248 | 841 | 34 | 1 | 6 | 267 |
4,008 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_statsd.py
|
tests.test_statsd.TestStatsFile
|
class TestStatsFile(AlignakTest):
"""
This class test the Alignak stats in a file
"""
def setUp(self):
super(TestStatsFile, self).setUp()
# Log at DEBUG level
self.set_unit_tests_logger_level()
self.clear_logs()
# Declare environment to send stats to a file
os.environ['ALIGNAK_STATS_FILE'] = '/tmp/stats.alignak'
# Those are the same as the default values:
os.environ['ALIGNAK_STATS_FILE_LINE_FMT'] = '[#date#] #counter# #value# #uom#\n'
os.environ['ALIGNAK_STATS_FILE_DATE_FMT'] = '%Y-%m-%d %H:%M:%S'
# Create our stats manager...
self.statsmgr = Stats()
assert self.statsmgr.stats_file == '/tmp/stats.alignak'
assert self.statsmgr.line_fmt == '[#date#] #counter# #value# #uom#\n'
assert self.statsmgr.date_fmt == '%Y-%m-%d %H:%M:%S'
self.line_count = 0
if os.path.exists('/tmp/stats.alignak'):
os.remove('/tmp/stats.alignak')
def tearDown(self):
self.statsmgr.file_d.close()
print(("-----\n%s stats file\n-----\n" % '/tmp/stats.alignak'))
try:
hfile = open('/tmp/stats.alignak', 'r')
lines = hfile.readlines()
print(lines)
hfile.close()
assert self.line_count == len(lines)
except OSError as exp:
print(("Error: %s" % exp))
assert False
def test_statsmgr_timer_file(self):
""" Test sending data for a timer
:return:
"""
# Register stats manager as enabled but no report to StatsD
self.statsmgr.register('arbiter-master', 'arbiter',
statsd_enabled=True, statsd_host=None)
index = 0
self.assert_log_match(re.escape(
'StatsD configuration for arbiter-master - None:8125, prefix: alignak, '
'enabled: True, broks: False, file: /tmp/stats.alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are written in the file /tmp/stats.alignak'
), index)
assert self.statsmgr.stats == {}
# Create a metric statistic
self.statsmgr.timer('test', 0)
assert len(self.statsmgr.stats) == 1
# Get min, max, count and sum
assert self.statsmgr.stats['test'] == (0, 0, 1, 0)
assert self.statsmgr.file_d is not None
assert os.path.exists(self.statsmgr.stats_file)
self.line_count += 1
# Increment
self.statsmgr.timer('test', 1)
assert len(self.statsmgr.stats) == 1
# Get min, max, count (incremented) and sum
assert self.statsmgr.stats['test'] == (0, 1, 2, 1)
self.line_count += 1
def test_statsmgr_counter_file(self):
""" Test sending data for a counter
:return:
"""
# Register stats manager as enabled but no report to StatsD
self.statsmgr.register('arbiter-master', 'arbiter',
statsd_enabled=True, statsd_host=None)
index = 0
self.assert_log_match(re.escape(
'StatsD configuration for arbiter-master - None:8125, prefix: alignak, '
'enabled: True, broks: False, file: /tmp/stats.alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are written in the file /tmp/stats.alignak'
), index)
assert self.statsmgr.stats == {}
# Create a metric statistic
self.statsmgr.counter('test', 0)
assert len(self.statsmgr.stats) == 1
# Get min, max, count and sum
assert self.statsmgr.stats['test'] == (0, 0, 1, 0)
self.line_count += 1
def test_statsmgr_gauge_file(self):
""" Test sending data for a gauge
:return:
"""
# Register stats manager as enabled
self.statsmgr.register('arbiter-master', 'arbiter',
statsd_host='localhost', statsd_port=8125,
statsd_prefix='alignak', statsd_enabled=True,
broks_enabled=True)
index = 0
self.assert_log_match(re.escape(
'StatsD configuration for arbiter-master - localhost:8125, prefix: alignak, '
'enabled: True, broks: True, file: /tmp/stats.alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Sending arbiter-master statistics to: localhost:8125, prefix: alignak'
), index)
index += 1
self.assert_log_match(re.escape(
'Trying to contact StatsD server...'
), index)
index += 1
self.assert_log_match(re.escape(
'StatsD server contacted'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are sent to StatsD.'
), index)
index += 1
self.assert_log_match(re.escape(
'Alignak internal statistics are written in the file /tmp/stats.alignak'
), index)
index += 1
assert self.statsmgr.stats == {}
# Create a metric statistic
self.statsmgr.gauge('test', 0)
assert len(self.statsmgr.stats) == 1
# Get min, max, count and sum
assert self.statsmgr.stats['test'] == (0, 0, 1, 0)
self.line_count += 1
|
class TestStatsFile(AlignakTest):
'''
This class test the Alignak stats in a file
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_statsmgr_timer_file(self):
''' Test sending data for a timer
:return:
'''
pass
def test_statsmgr_counter_file(self):
''' Test sending data for a counter
:return:
'''
pass
def test_statsmgr_gauge_file(self):
''' Test sending data for a gauge
:return:
'''
pass
| 6 | 4 | 28 | 3 | 20 | 5 | 1 | 0.28 | 1 | 3 | 1 | 0 | 5 | 2 | 5 | 60 | 148 | 18 | 103 | 14 | 97 | 29 | 75 | 13 | 69 | 2 | 2 | 1 | 7 |
4,009 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.template_DaemonLink_get_name
|
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
print(("Link: %s / %s" % (type(link), link)))
link.fill_default()
print(("Name: %s / %s / %s" % (link.type, link.name, link.get_name())))
print(("Config: %s" % (link.give_satellite_cfg())))
print(("Config: %s" % (link.have_conf)))
assert False == link.have_conf
try:
self.assertEqual("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
|
class template_DaemonLink_get_name():
def get_link(self):
pass
def test_get_name(self):
pass
| 3 | 0 | 8 | 1 | 8 | 0 | 2 | 0 | 0 | 2 | 0 | 6 | 2 | 0 | 2 | 2 | 18 | 2 | 16 | 5 | 13 | 0 | 16 | 5 | 13 | 2 | 0 | 1 | 3 |
4,010 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/servicedependency.py
|
alignak.objects.servicedependency.Servicedependencies
|
class Servicedependencies(Items):
"""Servicedependencies manage a list of Servicedependency objects,
used for parsing configuration
"""
inner_class = Servicedependency
def delete_svc_dep_by_id(self, ids):
"""Delete a list of servicedependency
:param ids: ids list to delete
:type ids: list
:return: None
"""
for s_id in ids:
del self[s_id]
def add_service_dependency(self, dep_host_name, dep_service_description,
par_host_name, par_service_description):
"""Instantiate and add a Servicedependency object to the items dict::
* notification criteria is "u,c,w"
* inherits_parent is True
:param dep_host_name: dependent host name
:type dep_host_name: str
:param dep_service_description: dependent service description
:type dep_service_description: str
:param par_host_name: host name
:type par_host_name: str
:param par_service_description: service description
:type par_service_description: str
:return: None
"""
# We create a "standard" service_dep
params = {
'host_name': par_host_name,
'service_description': par_service_description,
'dependent_host_name': dep_host_name,
'dependent_service_description': dep_service_description,
'notification_failure_criteria': 'u,c,w',
'inherits_parent': '1'
}
self.add_item(Servicedependency(params))
def explode_hostgroup(self, svc_dep, hostgroups):
# pylint: disable=too-many-locals
"""Explode a service dependency for each member of hostgroup
:param svc_dep: service dependency to explode
:type svc_dep: alignak.objects.servicedependency.Servicedependency
:param hostgroups: used to find hostgroup objects
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return:None
"""
# We will create a service dependency for each host part of the host group
# First get services
snames = [d.strip() for d in svc_dep.service_description.split(',')]
# And dep services
dep_snames = [d.strip() for d in svc_dep.dependent_service_description.split(',')]
# Now for each host into hostgroup we will create a service dependency object
hg_names = [n.strip() for n in svc_dep.hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an unknown hostgroup_name '%s'" % hg_name
self.add_error(err)
continue
hnames = []
hnames.extend([m.strip() for m in hostgroup.get_hosts()])
for hname in hnames:
for dep_sname in dep_snames:
for sname in snames:
new_sd = svc_dep.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
def explode(self, hostgroups):
# pylint: disable=too-many-locals, too-many-branches
"""Explode all service dependency for each member of hostgroups
Each member of dependent hostgroup or hostgroup in dependency have to get a copy of
service dependencies (quite complex to parse)
:param hostgroups: used to look for hostgroup
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# The "old" services will be removed. All services with
# more than one host or a host group will be in it
to_be_removed = []
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
for svc_dep_id in list(self.items.keys()):
svc_dep = self.items[svc_dep_id]
# First case: we only have to propagate the services dependencies to
# all the hosts of some hostgroups
# Either a specific property is defined (Shinken/Alignak) or
# no dependent hosts groups is defined
if getattr(svc_dep, 'explode_hostgroup', '0') == '1' or \
(hasattr(svc_dep, 'hostgroup_name') and
not hasattr(svc_dep, 'dependent_hostgroup_name')):
self.explode_hostgroup(svc_dep, hostgroups)
to_be_removed.append(svc_dep_id)
continue
# Get the list of all FATHER hosts and service dependencies
father_hosts = []
if getattr(svc_dep, 'host_name', ''):
father_hosts.extend([h.strip() for h in svc_dep.host_name.split(',')])
if getattr(svc_dep, 'hostgroup_name', ''):
hg_names = [g.strip() for g in svc_dep.hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
hostgroup.add_error("A servicedependecy got an unknown "
"hostgroup_name '%s'" % hg_name)
continue
father_hosts.extend([m.strip() for m in hostgroup.get_hosts()])
services = []
if getattr(svc_dep, 'service_description', ''):
services = [s.strip() for s in svc_dep.service_description.split(',')]
couples = []
for host_name in father_hosts:
for service_description in services:
couples.append((host_name, service_description))
if not hasattr(svc_dep, 'dependent_hostgroup_name') \
and hasattr(svc_dep, 'hostgroup_name'):
svc_dep.dependent_hostgroup_name = svc_dep.hostgroup_name
# Now the dependent part (the sons)
son_hosts = []
if getattr(svc_dep, 'dependent_hostgroup_name', ''):
hg_names = [g.strip() for g in svc_dep.dependent_hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
hostgroup.add_error("A servicedependecy got an unknown "
"dependent_hostgroup_name '%s'" % hg_name)
continue
son_hosts.extend([m.strip() for m in hostgroup.get_hosts()])
if not hasattr(svc_dep, 'dependent_host_name'):
svc_dep.dependent_host_name = getattr(svc_dep, 'host_name', '')
if getattr(svc_dep, 'dependent_host_name', ''):
son_hosts.extend([h.strip() for h in svc_dep.dependent_host_name.split(',')])
dep_snames = [s.strip() for s in svc_dep.dependent_service_description.split(',')]
dep_couples = []
for dep_hname in son_hosts:
for dep_sname in dep_snames:
dep_couples.append((dep_hname.strip(), dep_sname.strip()))
# Create the new service dependencies from all this stuff
for (dep_hname, dep_sname) in dep_couples: # the sons, like HTTP
for (host_name, service_description) in couples: # the fathers, like MySQL
new_sd = svc_dep.copy()
new_sd.host_name = host_name
new_sd.service_description = service_description
new_sd.dependent_host_name = dep_hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
# Ok so we can remove the old one
to_be_removed.append(svc_dep_id)
self.delete_svc_dep_by_id(to_be_removed)
def linkify(self, hosts, services, timeperiods):
"""Create link between objects::
* servicedependency -> host
* servicedependency -> service
* servicedependency -> timeperiods
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:param services: services to link
:type services: alignak.objects.service.Services
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
self.linkify_svc_dep_by_service(hosts, services)
self.linkify_svc_dep_by_timeperiod(timeperiods)
self.linkify_service_by_svc_dep(services)
def linkify_svc_dep_by_service(self, hosts, services):
"""Replace dependent_service_description and service_description
in service dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:param services: service list to look for a specific one
:type services: alignak.objects.service.Services
:return: None
"""
to_del = []
for svc_dep in self:
try:
s_name = svc_dep.dependent_service_description
hst_name = svc_dep.dependent_host_name
# The new member list, in id
service = services.find_srv_by_name_and_hostname(hst_name, s_name)
if service is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
self.add_error("Service %s not found for host %s" % (s_name, hst_name))
elif host:
self.add_warning("Service %s is excluded from host %s ; "
"removing this service dependency as it's unusable."
% (s_name, hst_name))
to_del.append(svc_dep)
continue
svc_dep.dependent_service_description = service.uuid
s_name = svc_dep.service_description
hst_name = svc_dep.host_name
# The new member list, in id
service = services.find_srv_by_name_and_hostname(hst_name, s_name)
if service is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
self.add_error("Service %s not found for host %s" % (s_name, hst_name))
elif host:
self.add_warning("Service %s is excluded from host %s ; "
"removing this service dependency as it's unusable."
% (s_name, hst_name))
to_del.append(svc_dep)
continue
svc_dep.service_description = service.uuid
except AttributeError as err:
logger.error("[servicedependency] fail to linkify by service %s: %s",
svc_dep, err)
to_del.append(svc_dep)
for svc_dep in to_del:
self.remove_item(svc_dep)
def linkify_svc_dep_by_timeperiod(self, timeperiods):
"""Replace dependency_period by a real object in service dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for svc_dep in self:
try:
svc_dep.dependency_period = ''
timeperiod = timeperiods.find_by_name(svc_dep.dependency_period)
if timeperiod:
svc_dep.dependency_period = timeperiod.uuid
except AttributeError as exp:
logger.error("[servicedependency] fail to linkify by timeperiods: %s", exp)
def linkify_service_by_svc_dep(self, services):
"""Add dependency in service objects
:return: None
"""
for svc_dep in self:
# Only used for debugging purpose when loops are detected
setattr(svc_dep, "service_description_string", "undefined")
setattr(svc_dep, "dependent_service_description_string", "undefined")
if getattr(svc_dep, 'service_description', None) is None:
continue
if getattr(svc_dep, 'dependent_service_description', None) is None:
continue
services.add_act_dependency(svc_dep.dependent_service_description,
svc_dep.service_description,
svc_dep.notification_failure_criteria,
getattr(svc_dep, 'dependency_period', '24x7'),
svc_dep.inherits_parent)
services.add_chk_dependency(svc_dep.dependent_service_description,
svc_dep.service_description,
svc_dep.execution_failure_criteria,
getattr(svc_dep, 'dependency_period', '24x7'),
svc_dep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(svc_dep, "service_description_string",
services[svc_dep.service_description].get_name())
setattr(svc_dep, "dependent_service_description_string",
services[svc_dep.dependent_service_description].get_name())
def is_correct(self):
"""Check if this servicedependency configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
loop = self.no_loop_in_parents("service_description", "dependent_service_description")
if loop:
self.add_error("Loop detected while checking service dependencies:")
state = False
for item in self:
for elem in loop:
if elem == item.service_description:
self.add_error("- service %s is a parent service_description "
"in dependency defined in %s"
% (item.service_description_string,
item.imported_from))
elif elem == item.dependent_service_description:
self.add_error("- service %s is a child service_description "
"in dependency defined in %s"
% (item.dependent_service_description_string,
item.imported_from))
return super(Servicedependencies, self).is_correct() and state
|
class Servicedependencies(Items):
'''Servicedependencies manage a list of Servicedependency objects,
used for parsing configuration
'''
def delete_svc_dep_by_id(self, ids):
'''Delete a list of servicedependency
:param ids: ids list to delete
:type ids: list
:return: None
'''
pass
def add_service_dependency(self, dep_host_name, dep_service_description,
par_host_name, par_service_description):
'''Instantiate and add a Servicedependency object to the items dict::
* notification criteria is "u,c,w"
* inherits_parent is True
:param dep_host_name: dependent host name
:type dep_host_name: str
:param dep_service_description: dependent service description
:type dep_service_description: str
:param par_host_name: host name
:type par_host_name: str
:param par_service_description: service description
:type par_service_description: str
:return: None
'''
pass
def explode_hostgroup(self, svc_dep, hostgroups):
'''Explode a service dependency for each member of hostgroup
:param svc_dep: service dependency to explode
:type svc_dep: alignak.objects.servicedependency.Servicedependency
:param hostgroups: used to find hostgroup objects
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return:None
'''
pass
def explode_hostgroup(self, svc_dep, hostgroups):
'''Explode all service dependency for each member of hostgroups
Each member of dependent hostgroup or hostgroup in dependency have to get a copy of
service dependencies (quite complex to parse)
:param hostgroups: used to look for hostgroup
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
'''
pass
def linkify(self, hosts, services, timeperiods):
'''Create link between objects::
* servicedependency -> host
* servicedependency -> service
* servicedependency -> timeperiods
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:param services: services to link
:type services: alignak.objects.service.Services
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
'''
pass
def linkify_svc_dep_by_service(self, hosts, services):
'''Replace dependent_service_description and service_description
in service dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:param services: service list to look for a specific one
:type services: alignak.objects.service.Services
:return: None
'''
pass
def linkify_svc_dep_by_timeperiod(self, timeperiods):
'''Replace dependency_period by a real object in service dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
'''
pass
def linkify_service_by_svc_dep(self, services):
'''Add dependency in service objects
:return: None
'''
pass
def is_correct(self):
'''Check if this servicedependency configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 10 | 10 | 35 | 4 | 21 | 10 | 6 | 0.5 | 1 | 4 | 1 | 0 | 9 | 0 | 9 | 54 | 332 | 49 | 190 | 57 | 179 | 95 | 153 | 54 | 143 | 20 | 2 | 4 | 54 |
4,011 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/serviceescalation.py
|
alignak.objects.serviceescalation.Serviceescalation
|
class Serviceescalation(Item):
"""Serviceescalation class is used to implement notification escalation for services
TODO: Why this class does not inherit from alignak.objects.Escalation.
Maybe we can merge it
"""
my_type = 'serviceescalation'
properties = Item.properties.copy()
properties.update({
'host_name':
StringProp(),
'hostgroup_name':
StringProp(),
'service_description':
StringProp(),
'first_notification':
IntegerProp(),
'last_notification':
IntegerProp(),
'notification_interval':
IntegerProp(default=30), # like Nagios value
'escalation_period':
StringProp(default=''),
'escalation_options':
ListProp(default=['w', 'x', 'c', 'r'], split_on_comma=True),
'contacts':
ListProp(default=[], merging='join', split_on_comma=True),
'contact_groups':
ListProp(default=[], merging='join', split_on_comma=True),
'first_notification_time':
IntegerProp(),
'last_notification_time':
IntegerProp(),
})
def __init__(self, params, parsing=True):
# Update default options
for prop in ['escalation_options']:
if prop in params:
params[prop] = [p.replace('u', 'x') for p in params[prop]]
super(Serviceescalation, self).__init__(params, parsing=parsing)
self.fill_default()
|
class Serviceescalation(Item):
'''Serviceescalation class is used to implement notification escalation for services
TODO: Why this class does not inherit from alignak.objects.Escalation.
Maybe we can merge it
'''
def __init__(self, params, parsing=True):
pass
| 2 | 1 | 7 | 0 | 6 | 1 | 3 | 0.17 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 35 | 43 | 3 | 35 | 5 | 33 | 6 | 10 | 5 | 8 | 3 | 3 | 2 | 3 |
4,012 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/http/daemon.py
|
alignak.http.daemon.PortNotFree
|
class PortNotFree(Exception):
"""Exception raised when port is already used by another application"""
pass
|
class PortNotFree(Exception):
'''Exception raised when port is already used by another application'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 2 | 1 | 1 | 1 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
4,013 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestCommand
|
class TestCommand(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['command_name', 'command_line']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('poller_tag', 'None'),
('reactionner_tag', 'None'),
('module_type', 'fork'),
('timeout', -1),
('enable_environment_macros', False),
])
def setUp(self):
super(TestCommand, self).setUp()
from alignak.objects.command import Command
self.item = None
self.item = Command({}, parsing=True)
print(self.item.properties)
|
class TestCommand(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 25 | 4 | 21 | 7 | 18 | 0 | 10 | 7 | 7 | 1 | 2 | 0 | 1 |
4,014 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.PropertiesTester
|
class PropertiesTester(object):
def test_unused_properties(self):
item = self.item # shortcut
print("Testing unused properties:")
for name in self.unused_props:
print(("- %s" % name))
assert name in item.properties, \
'property %r not found in %s' % (name, self.item.my_type)
assert isinstance(item.properties[name], UnusedProp)
def test_properties_without_default(self):
item = self.item # shortcut
print("Testing properties without default:")
for name in self.without_default:
print("- %s" % name)
assert name in item.properties, \
'property %r not found in %s' % (name, self.item.my_type)
assert isinstance(item.properties[name], ( ListProp, StringProp, IntegerProp )), \
'property %r is not `ListProp` or `StringProp` but %r' % (name, item.properties[name])
assert item.properties[name].required, 'property %r is required' % name
def test_default_values(self):
item = self.item # shortcut
print("Testing properties with default:")
for name, value in self.properties.items():
print("- %s=%s" % (name, value))
assert name in item.properties, \
'property %r not found in %s' % (name, self.item.my_type)
if hasattr(item.properties[name], 'default'):
if not item.properties[name].unused:
if item.properties[name].default != value:
print("Bad default value for %s, got: '%s', expected: '%s'"
% (name, value, item.properties[name].default))
assert item.properties[name].default == value, \
"Default value %s for %s is not correct" % (name, value)
def test_all_props_are_tested(self):
item = self.item # shortcut
prop_names = set(list(self.properties.keys()) + self.unused_props + self.without_default)
print("Testing all properties are tested:")
print("- list: %s" % prop_names)
for name in item.properties:
if name.startswith('$') and name.endswith('$'):
continue
print("- %s" % name)
assert name in prop_names, 'unknown property %r found' % name
|
class PropertiesTester(object):
def test_unused_properties(self):
pass
def test_properties_without_default(self):
pass
def test_default_values(self):
pass
def test_all_props_are_tested(self):
pass
| 5 | 0 | 11 | 0 | 11 | 1 | 3 | 0.09 | 1 | 6 | 4 | 20 | 4 | 0 | 4 | 4 | 48 | 5 | 43 | 14 | 38 | 4 | 37 | 14 | 32 | 5 | 1 | 4 | 12 |
4,015 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/dependencynode.py
|
alignak.dependencynode.DependencyNodeFactory
|
class DependencyNodeFactory(object):
"""DependencyNodeFactory provides dependency node parsing functions
"""
host_flags = "grlt"
service_flags = "grl"
def __init__(self, bound_item):
self.bound_item = bound_item
def eval_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
"""Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()&|':
if char in pattern:
complex_node = True
# If it's a simple node, evaluate it directly
if complex_node is False:
return self.eval_simple_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running)
return self.eval_complex_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running)
@staticmethod
def eval_xof_pattern(node, pattern):
"""Parse a X of pattern
* Set is_of_mul attribute
* Set of_values attribute
:param node: node to edit
:type node:
:param pattern: line to match
:type pattern: str
:return: end of the line (without X of :)
:rtype: str
"""
xof_pattern = r"^(-?\d+%?),*(-?\d*%?),*(-?\d*%?) *of: *(.+)"
regex = re.compile(xof_pattern)
matches = regex.search(pattern)
if matches is not None:
node.operand = 'of:'
groups = matches.groups()
# We can have a Aof: rule, or a multiple A,B,Cof: rule.
mul_of = (groups[1] != '' and groups[2] != '')
# If multi got (A,B,C)
if mul_of:
node.is_of_mul = True
node.of_values = (groups[0], groups[1], groups[2])
else: # if not, use A,0,0, we will change 0 after to put MAX
node.of_values = (groups[0], '0', '0')
pattern = matches.groups()[3]
return pattern
def eval_complex_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
# pylint: disable=too-many-branches
"""Parse and build recursively a tree of DependencyNode from a complex pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
in_par = False
tmp = ''
son_is_not = False # We keep is the next son will be not or not
stacked_parenthesis = 0
for char in pattern:
if char == '(':
stacked_parenthesis += 1
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_parenthesis == 1 and tmp != '':
# TODO : real error
print("ERROR : bad expression near '%s'" % tmp)
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_parenthesis > 1:
tmp += char
elif char == ')':
stacked_parenthesis -= 1
if stacked_parenthesis < 0:
# TODO : real error
print("Error : bad expression near '%s' too much ')'" % tmp)
continue
if stacked_parenthesis == 0:
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp, hosts, services,
hostgroups, servicegroups, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Expressions in par will be parsed in a sub node after. So just
# stack pattern
elif in_par:
tmp += char
# Until here, we're not in par
# Manage the NOT for an expression. Only allow ! at the beginning
# of a host or a host,service expression.
elif char == '!':
tmp = tmp.strip()
if tmp and tmp[0] != '!':
print("Error : bad expression near '%s', wrong position for '!'" % tmp)
continue
# Flags next node not state
son_is_not = True
# DO NOT keep the c in tmp, we consumed it
elif char in ['&', '|']:
# Oh we got a real cut in an expression, if so, cut it
tmp = tmp.strip()
# Look at the rule viability
if node.operand is not None and node.operand != 'of:' and char != node.operand:
# Should be logged as a warning / info? :)
return None
if node.operand != 'of:':
node.operand = char
if tmp != '':
son = self.eval_cor_pattern(tmp, hosts, services,
hostgroups, servicegroups, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
tmp = ''
# Maybe it's a classic character or we're in par, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
son = self.eval_cor_pattern(tmp, hosts, services,
hostgroups, servicegroups, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
# We got our nodes, so we can update 0 values of of_values
# with the number of sons
node.switch_zeros_of_values()
return node
def eval_simple_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
"""Parse and build recursively a tree of DependencyNode from a simple pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
# Is the pattern an expression to be expanded?
if re.search(r"^([%s]+|\*):" % self.host_flags, pattern) or \
re.search(r",\s*([%s]+:.*|\*)$" % self.service_flags, pattern):
# o is just extracted its attributes, then trashed.
son = self.expand_expression(pattern, hosts, services,
hostgroups, servicegroups, running)
if node.operand != 'of:':
node.operand = '&'
node.sons.extend(son.sons)
node.configuration_errors.extend(son.configuration_errors)
node.switch_zeros_of_values()
else:
node.operand = 'object'
obj, error = self.find_object(pattern, hosts, services)
# here we have Alignak SchedulingItem object (Host/Service)
if obj is not None:
# Set host or service
# pylint: disable=E1101
node.operand = obj.__class__.my_type
node.sons.append(obj.uuid) # Only store the uuid, not the full object.
else:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
def find_object(self, pattern, hosts, services):
"""Find object from pattern
:param pattern: text to search (host1,service1)
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:return: tuple with Host or Service object and error
:rtype: tuple
"""
obj = None
error = None
is_service = False
# h_name, service_desc are , separated
elts = pattern.split(',')
host_name = elts[0].strip()
# If host_name is empty, use the host_name the business rule is bound to
if not host_name:
host_name = self.bound_item.host_name
# Look if we have a service
if len(elts) > 1:
is_service = True
service_description = elts[1].strip()
if is_service:
obj = services.find_srv_by_name_and_hostname(host_name, service_description)
if not obj:
error = "business rule uses unknown service %s/%s"\
% (host_name, service_description)
else:
obj = hosts.find_by_name(host_name)
if not obj:
error = "business rule uses unknown host %s" % (host_name,)
return obj, error
def expand_expression(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
# pylint: disable=too-many-locals
"""Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
error = None
node = DependencyNode()
node.operand = '&'
elts = [e.strip() for e in pattern.split(',')]
# If host_name is empty, use the host_name the business rule is bound to
if not elts[0]:
elts[0] = self.bound_item.host_name
filters = []
# Looks for hosts/services using appropriate filters
try:
all_items = {
"hosts": hosts,
"hostgroups": hostgroups,
"servicegroups": servicegroups
}
if len(elts) > 1:
# We got a service expression
host_expr, service_expr = elts
filters.extend(self.get_srv_host_filters(host_expr))
filters.extend(self.get_srv_service_filters(service_expr))
items = services.find_by_filter(filters, all_items)
else:
# We got a host expression
host_expr = elts[0]
filters.extend(self.get_host_filters(host_expr))
items = hosts.find_by_filter(filters, all_items)
except re.error as regerr:
error = "business rule uses invalid regex %s: %s" % (pattern, regerr)
else:
if not items:
error = "business rule got an empty result for pattern '%s'" % pattern
# Checks if we got result
if error:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# Creates dependency node subtree
# here we have Alignak SchedulingItem object (Host/Service)
for item in items:
# Creates a host/service node
son = DependencyNode()
son.operand = item.__class__.my_type
son.sons.append(item.uuid) # Only store the uuid, not the full object.
# Appends it to wrapping node
node.sons.append(son)
node.switch_zeros_of_values()
return node
def get_host_filters(self, expr):
# pylint: disable=too-many-return-statements
"""Generates host filter list corresponding to the expression ::
* '*' => any
* 'g' => group filter
* 'r' => regex name filter
* 'l' => bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_host_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_host_by_group(expr)]
if "r" in flags:
return [filter_host_by_regex(expr)]
if "l" in flags:
return [filter_host_by_bp_rule_label(expr)]
if "t" in flags:
return [filter_host_by_tag(expr)]
return [filter_none]
def get_srv_host_filters(self, expr):
# pylint: disable=too-many-return-statements
"""Generates service filter list corresponding to the expression ::
* '*' => any
* 'g' => hostgroup filter
* 'r' => host regex name filter
* 'l' => host bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_service_by_host_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_hostgroup_name(expr)]
if "r" in flags:
return [filter_service_by_regex_host_name(expr)]
if "l" in flags:
return [filter_service_by_host_bp_rule_label(expr)]
if "t" in flags:
return [filter_service_by_host_tag_name(expr)]
return [filter_none]
def get_srv_service_filters(self, expr):
"""Generates service filter list corresponding to the expression ::
* '*' => any
* 'g' => servicegroup filter
* 'r' => service regex name filter
* 'l' => service bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => service name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.service_flags, expr)
if match is None:
return [filter_service_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_servicegroup_name(expr)]
if "r" in flags:
return [filter_service_by_regex_name(expr)]
if "l" in flags:
return [filter_service_by_bp_rule_label(expr)]
return [filter_none]
|
class DependencyNodeFactory(object):
'''DependencyNodeFactory provides dependency node parsing functions
'''
def __init__(self, bound_item):
pass
def eval_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
'''Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
'''
pass
@staticmethod
def eval_xof_pattern(node, pattern):
'''Parse a X of pattern
* Set is_of_mul attribute
* Set of_values attribute
:param node: node to edit
:type node:
:param pattern: line to match
:type pattern: str
:return: end of the line (without X of :)
:rtype: str
'''
pass
def eval_complex_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
'''Parse and build recursively a tree of DependencyNode from a complex pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
'''
pass
def eval_simple_cor_pattern(self, pattern, hosts, services,
hostgroups, servicegroups, running=False):
'''Parse and build recursively a tree of DependencyNode from a simple pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
'''
pass
def find_object(self, pattern, hosts, services):
'''Find object from pattern
:param pattern: text to search (host1,service1)
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:return: tuple with Host or Service object and error
:rtype: tuple
'''
pass
def expand_expression(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
'''Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
'''
pass
def get_host_filters(self, expr):
'''Generates host filter list corresponding to the expression ::
* '*' => any
* 'g' => group filter
* 'r' => regex name filter
* 'l' => bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
'''
pass
def get_srv_host_filters(self, expr):
'''Generates service filter list corresponding to the expression ::
* '*' => any
* 'g' => hostgroup filter
* 'r' => host regex name filter
* 'l' => host bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
'''
pass
def get_srv_service_filters(self, expr):
'''Generates service filter list corresponding to the expression ::
* '*' => any
* 'g' => servicegroup filter
* 'r' => service regex name filter
* 'l' => service bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => service name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
'''
pass
| 12 | 10 | 45 | 4 | 23 | 18 | 7 | 0.75 | 1 | 2 | 1 | 0 | 9 | 1 | 10 | 10 | 465 | 55 | 237 | 56 | 223 | 177 | 210 | 52 | 199 | 19 | 1 | 4 | 67 |
4,016 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/dispatcher.py
|
alignak.dispatcher.Dispatcher
|
class Dispatcher(object):
# pylint: disable=too-many-instance-attributes
"""Dispatcher is in charge of sending configuration to other daemon.
It has to handle spare, realms, poller tags etc.
"""
def __init__(self, conf, arbiter_link):
# pylint: disable=too-many-branches
"""Initialize the dispatcher
Note that the arbiter param is an ArbiterLink, not an Arbiter daemon. Thus it is only
an interface to the running Arbiter daemon...
:param conf: the whole Alignak configuration as parsed by the Arbiter
:type conf: Config
:param arbiter_link: the link to the arbiter that parsed this configuration
:type arbiter_link: ArbiterLink
"""
if not arbiter_link or not hasattr(conf, 'parts'):
raise DispatcherError("Dispatcher configuration problem: "
"no valid arbiter link or configuration!")
self.arbiters = []
self.schedulers = []
self.reactionners = []
self.pollers = []
self.brokers = []
self.receivers = []
# List the satellites that are not configured
self.not_configured = []
# Direct pointer to important elements for us
self.arbiter_link = arbiter_link
self.alignak_conf = conf
self.global_conf = {}
# Get configuration data from the pushed configuration
cls = self.alignak_conf.__class__
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
if FULL_STATUS not in entry.fill_brok:
continue
self.global_conf[prop] = self.alignak_conf.get_property_value_for_brok(
prop, cls.properties)
# self.global_conf[prop] = getattr(self.alignak_conf, prop, entry.default)
logger.debug("Dispatcher configuration: %s / %s", self.arbiter_link, self.alignak_conf)
logger.debug("Dispatcher global configuration: %s", self.global_conf)
logger.info("Dispatcher realms configuration:")
for realm in self.alignak_conf.realms:
logger.info("- %s:", realm.name)
for cfg_part in list(realm.parts.values()):
logger.info(" .%s (%s), flavor:%s, %s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.push_flavor, cfg_part)
logger.debug("Dispatcher satellites configuration:")
for sat_type in ['arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers']:
setattr(self, sat_type, getattr(self.alignak_conf, sat_type))
# for each satellite, we look if current arbiter have a specific
# satellite map value set for this satellite.
# if so, we give this map to the satellite (used to build satellite URI later)
for satellite in getattr(self, sat_type):
logger.debug(" . %s", satellite)
satellite.set_arbiter_satellite_map(
self.arbiter_link.satellite_map.get(satellite.name, {}))
logger.info("Dispatcher arbiters/satellites map:")
for sat_type in ['arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers']:
for satellite in getattr(self, sat_type):
logger.info("- %s: %s", satellite.name, satellite.uri)
for link in self.get_satellites_list('arbiters'):
# If not me and a spare arbiter...
if link == self.arbiter_link:
# I exclude myself from the dispatching, I have my configuration ;)
continue
# WTF, there is another master in my configuration!!!
if not link.spare:
raise DispatcherError("There is more than one master arbiter (%s) in "
"the configuration. This is not acceptable!" % arbiter_link)
# Add satellites in a list
self.satellites = []
self.satellites.extend(self.reactionners)
self.satellites.extend(self.pollers)
self.satellites.extend(self.brokers)
self.satellites.extend(self.receivers)
# all elements, including schedulers and satellites
self.all_daemons_links = []
self.all_daemons_links.extend(self.reactionners)
self.all_daemons_links.extend(self.pollers)
self.all_daemons_links.extend(self.brokers)
self.all_daemons_links.extend(self.receivers)
self.all_daemons_links.extend(self.schedulers)
self.all_daemons_links.extend(self.arbiters)
# All daemon links initially need to have a configuration
for daemon_link in self.all_daemons_links:
# We do not need a configuration :)
if daemon_link == self.arbiter_link:
continue
daemon_link.need_conf = True
# Some flag about dispatch needed or not
self.dispatch_ok = False
self.new_to_dispatch = False
self.first_dispatch_done = False
self.stop_request_sent = False
# Prepare the satellites configurations
for satellite in self.all_daemons_links:
satellite.prepare_for_conf()
def check_reachable(self, forced=False, test=False):
# pylint: disable=too-many-branches
"""Check all daemons state (reachable or not)
If test parameter is True, do not really send but simulate only for testing purpose...
The update_infos function returns None when no ping has been executed
(too early...), or True / False according to the real ping and get managed
configuration result. So, if the result is None, consider as not valid,
else compute the global result...
:return: True if all daemons are reachable
"""
all_ok = True
self.not_configured = []
for daemon_link in self.all_daemons_links:
if daemon_link == self.arbiter_link:
# I exclude myself from the polling, sure I am reachable ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
# ----------
if test:
# print("*** unit tests - setting %s as alive" % daemon_link.name)
# Set the satellite as alive
daemon_link.set_alive()
daemon_link.running_id = time.time()
# daemon_link.cfg_managed = {}
# continue
# ----------
# Force the daemon communication only if a configuration is prepared
result = False
try:
result = daemon_link.update_infos(forced=(forced or self.new_to_dispatch),
test=test)
except LinkError:
logger.warning("Daemon connection failed, I could not get fresh information.")
if result is not False:
if result is None:
# Come back later ... too recent daemon connection!
continue
if result:
# Got a managed configuration
logger.debug("The %s '%s' manages %s",
daemon_link.type, daemon_link.name, daemon_link.cfg_managed)
if not self.first_dispatch_done:
# I just (re)started the arbiter
self.not_configured.append(daemon_link)
else:
# No managed configuration - a new dispatching is necessary but only
# if we already dispatched a configuration
# Probably a freshly restarted daemon ;)
logger.debug("The %s %s do not have a configuration",
daemon_link.type, daemon_link.name)
# the daemon is not yet configured
self.not_configured.append(daemon_link)
daemon_link.configuration_sent = False
else:
# Got a timeout !
self.not_configured.append(daemon_link)
if self.not_configured and self.new_to_dispatch and not self.first_dispatch_done:
logger.info("Dispatcher, these daemons are not configured: %s, "
"and a configuration is ready to dispatch, run the dispatching...",
','.join(d.name for d in self.not_configured))
self.dispatch_ok = False
self.dispatch(test=test)
elif self.not_configured and self.first_dispatch_done:
logger.info("Dispatcher, these daemons are not configured: %s, "
"and a configuration has yet been dispatched dispatch, "
"a new dispatch is required...",
','.join(d.name for d in self.not_configured))
self.dispatch_ok = False
# Avoid exception because dispatch is not accepted!
self.new_to_dispatch = True
self.first_dispatch_done = False
self.dispatch(test=test)
return all_ok
def check_status_and_get_events(self):
# pylint: disable=too-many-branches
"""Get all the daemons status
:return: Dictionary with all the daemons returned information
:rtype: dict
"""
statistics = {}
events = []
for daemon_link in self.all_daemons_links:
if daemon_link == self.arbiter_link:
# I exclude myself from the polling, sure I am reachable ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
try:
# Do not get the details to avoid overloading the communication
daemon_link.statistics = daemon_link.get_daemon_stats(details=False)
if daemon_link.statistics:
daemon_link.statistics['_freshness'] = int(time.time())
statistics[daemon_link.name] = daemon_link.statistics
logger.debug("Daemon %s statistics: %s",
daemon_link.name, daemon_link.statistics)
except LinkError:
logger.warning("Daemon connection failed, I could not get statistics.")
try:
got = daemon_link.get_events()
if got:
events.extend(got)
logger.debug("Daemon %s has %d events: %s", daemon_link.name, len(got), got)
except LinkError:
logger.warning("Daemon connection failed, I could not get events.")
return events
def check_dispatch(self): # pylint: disable=too-many-branches
"""Check that all active satellites have a configuration dispatched
A DispatcherError exception is raised if no configuration is dispatched!
:return: None
"""
if not self.arbiter_link:
raise DispatcherError("Dispatcher configuration problem: no valid arbiter link!")
if not self.first_dispatch_done:
raise DispatcherError("Dispatcher cannot check the dispatching, "
"because no configuration is dispatched!")
# We check for configuration parts to be dispatched on alive schedulers.
# If not dispatched, we need a dispatch :) and if dispatched on a failed node,
# remove the association, and need a new dispatch
self.dispatch_ok = True
some_satellites_are_missing = False
# Get fresh information about the satellites
logger.info("Getting fresh information")
self.check_reachable(forced=True)
logger.info("Checking realms dispatch:")
for realm in self.alignak_conf.realms:
logger.info("- realm %s:", realm.name)
for cfg_part in list(realm.parts.values()):
logger.info(" .configuration %s", cfg_part)
# This should never happen, logically!
if not cfg_part.scheduler_link:
self.dispatch_ok = False
logger.error("- realm %s:", realm.name)
logger.error(" .configuration %s", cfg_part)
logger.error(" not managed by any scheduler!")
continue
logger.debug(" checking scheduler %s configuration: %s",
cfg_part.scheduler_link.name, cfg_part.instance_id)
# Maybe the scheduler restarts, so it is alive but without
# the expected configuration; set the configuration part as unmanaged
# and ask for a new configuration dispatch
if not cfg_part.scheduler_link.manages(cfg_part):
# We ask for a new dispatching
self.dispatch_ok = False
if cfg_part.scheduler_link.cfg_managed is None:
logger.warning(" %s not yet !.",
cfg_part.scheduler_link.name)
else:
logger.warning(" the assigned scheduler %s does not manage the "
"configuration; asking for a new configuration dispatch.",
cfg_part.scheduler_link.name)
cfg_part.scheduler_link.cfg_to_manage = None
cfg_part.scheduler_link.push_flavor = ''
cfg_part.scheduler_link.hash = ''
cfg_part.scheduler_link.need_conf = True
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
logger.debug(" checking %ss configuration", sat_type)
# We must have the correct number of satellites or we are not happy
# So we are sure to raise a dispatch every loop a satellite is missing
if (len(realm.to_satellites_managed_by[sat_type][cfg_part.instance_id]) <
realm.get_nb_of_must_have_satellites(sat_type)):
some_satellites_are_missing = True
logger.warning(" missing %s satellites: %s / %s!", sat_type,
realm.to_satellites_managed_by[sat_type][
cfg_part.instance_id],
realm.get_nb_of_must_have_satellites(sat_type))
# TODO: less violent! Must only resend to the one needing?
# must be caught by satellite who sees that
# it already has the conf and do nothing
self.dispatch_ok = False # so we will redispatch all
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
for satellite in realm.to_satellites_managed_by[sat_type][cfg_part.instance_id]:
# Maybe the sat was marked as not alive, but still in
# to_satellites_managed_by. That means that a new dispatch
# is needed
# Or maybe it is alive but I thought that this satellite
# managed the conf and it doesn't.
# I ask a full redispatch of these cfg for both cases
if not satellite.reachable:
logger.info(" the %s %s is not reachable; "
"assuming a correct configuration dispatch.",
sat_type, satellite.name)
continue
# if not cfg_part.push_flavor:
# logger.warning(" the %s %s manages an unmanaged configuration; "
# "asking for a new configuration dispatch.",
# sat_type, satellite.name)
if not satellite.manages(cfg_part):
logger.warning(" the %s %s does not manage "
"the correct configuration; "
"asking for a new configuration dispatch.",
sat_type, satellite.name)
self.dispatch_ok = False
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
if some_satellites_are_missing:
logger.warning("Some satellites are not available for the current configuration")
return self.dispatch_ok
def get_satellites_list(self, sat_type):
"""Get a sorted satellite list: master then spare
:param sat_type: type of the required satellites (arbiters, schedulers, ...)
:type sat_type: str
:return: sorted satellites list
:rtype: list[alignak.objects.satellitelink.SatelliteLink]
"""
satellites_list = []
if sat_type in ['arbiters', 'schedulers', 'reactionners',
'brokers', 'receivers', 'pollers']:
for satellite in getattr(self, sat_type):
satellites_list.append(satellite)
satellites_list = master_then_spare(satellites_list)
return satellites_list
def get_scheduler_ordered_list(self, realm):
"""Get sorted scheduler list for a specific realm
List is ordered as: alive first, then spare (if any), then dead scheduler links
:param realm: realm we want scheduler from
:type realm: alignak.objects.realm.Realm
:return: sorted scheduler list
:rtype: list[alignak.objects.schedulerlink.SchedulerLink]
"""
# Get the schedulers for the required realm
scheduler_links = []
for scheduler_link_uuid in realm.schedulers:
scheduler_links.append(self.schedulers[scheduler_link_uuid])
# Now we sort the schedulers so we take alive, then spare, then dead,
alive = []
spare = []
deads = []
for sdata in scheduler_links:
if sdata.alive and not sdata.spare:
alive.append(sdata)
elif sdata.alive and sdata.spare:
spare.append(sdata)
else:
deads.append(sdata)
scheduler_links = []
scheduler_links.extend(alive)
scheduler_links.extend(spare)
scheduler_links.extend(deads)
scheduler_links.reverse() # I need to pop the list, so reverse the list...
return scheduler_links
def prepare_dispatch(self):
# pylint:disable=too-many-branches, too-many-statements, too-many-locals
"""
Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,
pollers)
This function will only prepare something if self.new_to_dispatch is False
It will reset the first_dispatch_done flag
A DispatcherError exception is raised if a configuration is already prepared! Unset the
new_to_dispatch flag before calling!
:return: None
"""
if self.new_to_dispatch:
raise DispatcherError("A configuration is already prepared!")
# So we are preparing a new dispatching...
self.new_to_dispatch = True
self.first_dispatch_done = False
# Update Alignak name for all the satellites
for daemon_link in self.all_daemons_links:
daemon_link.cfg.update({'alignak_name': self.alignak_conf.alignak_name})
logger.info("Preparing realms dispatch:")
# Prepare the arbiters configuration
master_arbiter_cfg = arbiters_cfg = {}
for arbiter_link in self.get_satellites_list('arbiters'):
# # If not me and not a spare arbiter...
# if arbiter_link == self.arbiter_link:
# # I exclude myself from the dispatching, I have my configuration ;)
# continue
if not arbiter_link.active:
# I exclude the daemons that are not active
continue
arbiter_cfg = arbiter_link.cfg
arbiter_cfg.update({
'managed_hosts_names': [h.get_name() for h in self.alignak_conf.hosts],
'modules': serialize(arbiter_link.modules, True),
'managed_conf_id': self.alignak_conf.instance_id,
'push_flavor': ''
})
# Hash the configuration
cfg_string = serialize(arbiter_cfg, no_json=False).encode('utf-8')
arbiter_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Update the arbiters list, but do not include the whole conf
arbiters_cfg[arbiter_link.uuid] = arbiter_cfg['self_conf']
# Not for the master arbiter...
if arbiter_link != self.arbiter_link:
arbiter_cfg.update({
'arbiters': master_arbiter_cfg,
'whole_conf': self.alignak_conf.spare_arbiter_conf,
})
# Hash the whole configuration
cfg_string = serialize(arbiter_cfg, no_json=False).encode('utf-8')
arbiter_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(arbiter_cfg)
logger.info(' arbiter configuration size: %d bytes', sys.getsizeof(pickled_conf))
# The configuration is assigned to the arbiter
# todo: perhaps this should be done in the realms (like schedulers and satellites)?
arbiter_link.cfg = arbiter_cfg
arbiter_link.cfg_to_manage = self.alignak_conf
arbiter_link.push_flavor = arbiter_cfg['push_flavor']
arbiter_link.hash = arbiter_cfg['hash']
arbiter_link.need_conf = False
arbiter_link.configuration_sent = False
# If not me and not a spare arbiter...
if arbiter_link == self.arbiter_link:
# The master arbiter configuration for the other satellites
master_arbiter_cfg = {self.arbiter_link.uuid: arbiter_cfg['self_conf']}
logger.info(' arbiter configuration prepared for %s', arbiter_link.name)
# main_realm = self.alignak_conf.realms.find_by_name('All')
# all_realms = main_realm.all_sub_members
# for realm_uuid in all_realms:
# realm = self.alignak_conf.realms[realm_uuid]
# logger.info("- realm %s: %s", realm_uuid, realm)
for realm in self.alignak_conf.realms:
logger.info("- realm %s: %d configuration part(s)", realm.name, len(realm.parts))
# parts_to_dispatch is a list of configuration parts built when
# the configuration is split into parts for the realms and their schedulers
# Only get the parts that are not yet assigned to a scheduler
parts_to_dispatch = [cfg for cfg in list(realm.parts.values()) if not cfg.is_assigned]
if not parts_to_dispatch:
logger.info(' no configuration to dispatch for this realm!')
continue
logger.info(" preparing the dispatch for schedulers:")
# Now we get all the schedulers of this realm and upper
# schedulers = self.get_scheduler_ordered_list(realm)
schedulers = realm.get_potential_satellites_by_type(
self.get_satellites_list('schedulers'), 'scheduler')
if not schedulers:
logger.error(' no available schedulers in this realm (%s)!', realm)
continue
logger.info(" realm schedulers: %s",
','.join([s.get_name() for s in schedulers]))
for cfg_part in parts_to_dispatch:
logger.info(" .assigning configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
# we need to loop until the configuration part is assigned to a scheduler
# or no more scheduler is available
while True:
try:
scheduler_link = schedulers.pop()
except IndexError: # No more schedulers.. not good, no loop
# The configuration part do not need to be dispatched anymore
# todo: should be managed inside the Realm class!
logger.error("No more scheduler link: %s", realm)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
realm.to_satellites[sat_type][cfg_part.instance_id] = None
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = \
False
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
break
# if scheduler_link.manage_sub_realms:
# logger.warning('[%s] The scheduler %s is configured to manage sub realms.'
# ' This is not yet possible, sorry!',
# realm.name, scheduler_link.name)
# scheduler_link.manage_sub_realms = False
# continue
if not scheduler_link.need_conf:
logger.info('[%s] The scheduler %s do not need any configuration, sorry',
realm.name, scheduler_link.name)
continue
logger.debug(" preparing configuration part '%s' for the scheduler '%s'",
cfg_part.instance_id, scheduler_link.name)
logger.info(" - %d hosts, %d services",
len(cfg_part.hosts), len(cfg_part.services))
logger.info(" - %d host templates, %d service templates",
len(cfg_part.hosts.templates), len(cfg_part.services.templates))
# Serialization and hashing
s_conf_part = serialize(realm.parts[cfg_part.instance_id], no_json=False)
try:
s_conf_part = s_conf_part.encode('utf-8')
except UnicodeDecodeError:
pass
cfg_part.push_flavor = hashlib.sha1(s_conf_part).hexdigest()
# We generate the scheduler configuration for the satellites:
# ---
sat_scheduler_cfg = scheduler_link.give_satellite_cfg()
sat_scheduler_cfg.update({
'managed_hosts_names': [h.get_name() for h in cfg_part.hosts],
'managed_conf_id': cfg_part.instance_id,
'push_flavor': cfg_part.push_flavor
})
# Generate a configuration hash
cfg_string = serialize(sat_scheduler_cfg, no_json=False).encode('utf-8')
sat_scheduler_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
logger.debug(' satellite scheduler configuration: %s', sat_scheduler_cfg)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
realm.to_satellites[sat_type][cfg_part.instance_id] = sat_scheduler_cfg
realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = True
realm.to_satellites_managed_by[sat_type][cfg_part.instance_id] = []
# ---
scheduler_link.cfg.update({
# Global instance configuration
'instance_id': scheduler_link.instance_id,
'instance_name': scheduler_link.name,
'schedulers': {scheduler_link.uuid: sat_scheduler_cfg},
'arbiters': arbiters_cfg if scheduler_link.manage_arbiters else {},
'satellites': realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers),
'modules': serialize(scheduler_link.modules, True),
'conf_part': serialize(realm.parts[cfg_part.instance_id]),
'managed_conf_id': cfg_part.instance_id,
'push_flavor': cfg_part.push_flavor,
'override_conf': scheduler_link.get_override_configuration()
})
# Hash the whole configuration
cfg_string = serialize(scheduler_link.cfg, no_json=False).encode('utf-8')
scheduler_link.cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(scheduler_link.cfg)
logger.info(" scheduler configuration size: %d bytes",
sys.getsizeof(pickled_conf))
logger.info(" scheduler satellites:")
satellites = realm.get_links_for_a_scheduler(self.pollers,
self.reactionners,
self.brokers)
for sat_type in satellites:
logger.info(" - %s", sat_type)
for sat_link_uuid in satellites[sat_type]:
satellite = satellites[sat_type][sat_link_uuid]
logger.info(" %s", satellite['name'])
# The configuration part is assigned to a scheduler
cfg_part.is_assigned = True
cfg_part.scheduler_link = scheduler_link
scheduler_link.cfg_to_manage = cfg_part
scheduler_link.push_flavor = cfg_part.push_flavor
scheduler_link.hash = scheduler_link.cfg['hash']
scheduler_link.need_conf = False
scheduler_link.configuration_sent = False
logger.info(' configuration %s (%s) assigned to %s',
cfg_part.instance_id, cfg_part.push_flavor, scheduler_link.name)
# The configuration part is assigned to a scheduler, no need to go further ;)
break
logger.info(" preparing the dispatch for satellites:")
for cfg_part in list(realm.parts.values()):
logger.info(" .configuration part %s (%s), name:%s",
cfg_part.instance_id, cfg_part.uuid, cfg_part.config_name)
for sat_type in ('reactionner', 'poller', 'broker', 'receiver'):
if cfg_part.instance_id not in realm.to_satellites_need_dispatch[sat_type]:
logger.warning(" nothing to dispatch for %ss", sat_type)
return
if not realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id]:
logger.warning(" no need to dispatch to %ss", sat_type)
return
# Get the list of the concerned satellites
satellites = realm.get_potential_satellites_by_type(self.satellites, sat_type)
if satellites:
logger.info(" realm %ss: %s",
sat_type, ','.join([s.get_name() for s in satellites]))
else:
logger.info(" no %s satellites", sat_type)
# Now we dispatch cfg to every one ask for it
nb_cfg_prepared = 0
for sat_link in satellites:
if not sat_link.active:
# I exclude the daemons that are not active
continue
if nb_cfg_prepared > realm.get_nb_of_must_have_satellites(sat_type):
logger.warning("Too much configuration parts prepared "
"for the expected satellites count. "
"Realm: %s, satellite: %s - prepared: %d out of %d",
realm.name, sat_link.name, nb_cfg_prepared,
realm.get_nb_of_must_have_satellites(sat_type))
# Fred - 2018-07-20 - temporary disable this error raising!
# raise DispatcherError("Too much configuration parts prepared "
# "for the expected satellites count. "
# "This should never happen!")
logger.info(" preparing configuration part '%s' for the %s '%s'",
cfg_part.instance_id, sat_type, sat_link.name)
sat_link.cfg.update({
# Global instance configuration
'arbiters': arbiters_cfg if sat_link.manage_arbiters else {},
'modules': serialize(sat_link.modules, True),
'managed_conf_id': 'see_my_schedulers',
'global_conf': self.global_conf
})
sat_link.cfg['schedulers'].update({
cfg_part.uuid: realm.to_satellites[sat_type][cfg_part.instance_id]})
# Brokers should have pollers and reactionners links too
if sat_type == "broker":
sat_link.cfg.update({'satellites': realm.get_links_for_a_broker(
self.pollers, self.reactionners, self.receivers,
self.alignak_conf.realms, sat_link.manage_sub_realms)})
# Hash the whole configuration
cfg_string = serialize(sat_link.cfg, no_json=False).encode('utf-8')
sat_link.cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()
# Dump the configuration part size
pickled_conf = pickle.dumps(sat_link.cfg)
logger.info(' %s configuration size: %d bytes',
sat_type, sys.getsizeof(pickled_conf))
# The configuration part is assigned to a satellite
sat_link.cfg_to_manage = cfg_part
sat_link.push_flavor = cfg_part.push_flavor
sat_link.hash = sat_link.cfg['hash']
sat_link.need_conf = False
sat_link.configuration_sent = False
logger.info(' configuration %s (%s) assigned to %s',
cfg_part.instance_id, cfg_part.push_flavor, sat_link.name)
nb_cfg_prepared += 1
realm.to_satellites_managed_by[sat_type][
cfg_part.instance_id].append(sat_link)
# I've got enough satellite, the next ones are considered unuseful!
if nb_cfg_prepared == realm.get_nb_of_must_have_satellites(sat_type):
logger.info(" no more %s needed in this realm.", sat_type)
realm.to_satellites_need_dispatch[sat_type][
cfg_part.instance_id] = False
nb_missed = len([cfg for cfg in list(
self.alignak_conf.parts.values()) if not cfg.is_assigned])
if nb_missed > 0:
logger.warning("Some configuration parts are not dispatched, %d are missing", nb_missed)
else:
logger.info("All configuration parts are assigned "
"to schedulers and their satellites :)")
# Schedulers without a configuration in a dispatch ok do not need a configuration
# so they do not raise dispatching errors if they are not used
for scheduler_link in self.schedulers:
if not scheduler_link.cfg_to_manage:
# "so it do not ask anymore for conf"
logger.warning('The scheduler %s do not need a configuration!', scheduler_link.name)
scheduler_link.need_conf = False
def dispatch(self, test=False): # pylint: disable=too-many-branches
"""
Send configuration to satellites
:return: None
"""
if not self.new_to_dispatch:
raise DispatcherError("Dispatcher cannot dispatch, "
"because no configuration is prepared!")
if self.first_dispatch_done:
raise DispatcherError("Dispatcher cannot dispatch, "
"because the configuration is still dispatched!")
if self.dispatch_ok:
logger.info("Dispatching is already done and ok...")
return
logger.info("Trying to send configuration to the satellites...")
self.dispatch_ok = True
# todo: the 3 loops hereunder may be factorized
for link in self.arbiters:
# If not me and a spare arbiter...
if link == self.arbiter_link:
# I exclude myself from the dispatching, I have my configuration ;)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.spare:
# Do not dispatch to a master arbiter!
continue
if link.configuration_sent:
logger.debug("Arbiter %s already sent!", link.name)
continue
if not link.reachable:
logger.debug("Arbiter %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the arbiter %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
# Now that the spare arbiter has a configuration, tell him it must not run,
# because I'm not dead ;)
link.do_not_run()
for link in self.schedulers:
if link.configuration_sent:
logger.debug("Scheduler %s already sent!", link.name)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.reachable:
logger.debug("Scheduler %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the scheduler %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
for link in self.satellites:
if link.configuration_sent:
logger.debug("%s %s already sent!", link.type, link.name)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.reachable:
logger.warning("%s %s is not reachable to receive its configuration",
link.type, link.name)
continue
logger.info("Sending configuration to the %s %s", link.type, link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
if self.dispatch_ok:
# Newly prepared configuration got dispatched correctly
self.new_to_dispatch = False
self.first_dispatch_done = True
def stop_request(self, stop_now=False):
"""Send a stop request to all the daemons
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: True if all daemons are reachable
"""
all_ok = True
for daemon_link in self.all_daemons_links:
logger.debug("Stopping: %s (%s)", daemon_link, stop_now)
if daemon_link == self.arbiter_link:
# I exclude myself from the process, I know we are going to stop ;)
continue
if not daemon_link.active:
# I exclude the daemons that are not active
continue
# Send a stop request to the daemon
try:
stop_ok = daemon_link.stop_request(stop_now=stop_now)
except LinkError:
stop_ok = True
logger.warning("Daemon stop request failed, %s probably stopped!", daemon_link)
all_ok = all_ok and stop_ok
daemon_link.stopping = True
self.stop_request_sent = all_ok
return self.stop_request_sent
|
class Dispatcher(object):
'''Dispatcher is in charge of sending configuration to other daemon.
It has to handle spare, realms, poller tags etc.
'''
def __init__(self, conf, arbiter_link):
'''Initialize the dispatcher
Note that the arbiter param is an ArbiterLink, not an Arbiter daemon. Thus it is only
an interface to the running Arbiter daemon...
:param conf: the whole Alignak configuration as parsed by the Arbiter
:type conf: Config
:param arbiter_link: the link to the arbiter that parsed this configuration
:type arbiter_link: ArbiterLink
'''
pass
def check_reachable(self, forced=False, test=False):
'''Check all daemons state (reachable or not)
If test parameter is True, do not really send but simulate only for testing purpose...
The update_infos function returns None when no ping has been executed
(too early...), or True / False according to the real ping and get managed
configuration result. So, if the result is None, consider as not valid,
else compute the global result...
:return: True if all daemons are reachable
'''
pass
def check_status_and_get_events(self):
'''Get all the daemons status
:return: Dictionary with all the daemons returned information
:rtype: dict
'''
pass
def check_dispatch(self):
'''Check that all active satellites have a configuration dispatched
A DispatcherError exception is raised if no configuration is dispatched!
:return: None
'''
pass
def get_satellites_list(self, sat_type):
'''Get a sorted satellite list: master then spare
:param sat_type: type of the required satellites (arbiters, schedulers, ...)
:type sat_type: str
:return: sorted satellites list
:rtype: list[alignak.objects.satellitelink.SatelliteLink]
'''
pass
def get_scheduler_ordered_list(self, realm):
'''Get sorted scheduler list for a specific realm
List is ordered as: alive first, then spare (if any), then dead scheduler links
:param realm: realm we want scheduler from
:type realm: alignak.objects.realm.Realm
:return: sorted scheduler list
:rtype: list[alignak.objects.schedulerlink.SchedulerLink]
'''
pass
def prepare_dispatch(self):
'''
Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,
pollers)
This function will only prepare something if self.new_to_dispatch is False
It will reset the first_dispatch_done flag
A DispatcherError exception is raised if a configuration is already prepared! Unset the
new_to_dispatch flag before calling!
:return: None
'''
pass
def dispatch(self, test=False):
'''
Send configuration to satellites
:return: None
'''
pass
def stop_request(self, stop_now=False):
'''Send a stop request to all the daemons
:param stop_now: stop now or go to stop wait mode
:type stop_now: bool
:return: True if all daemons are reachable
'''
pass
| 10 | 10 | 96 | 16 | 58 | 23 | 13 | 0.4 | 1 | 6 | 2 | 0 | 9 | 16 | 9 | 9 | 882 | 155 | 524 | 78 | 514 | 208 | 422 | 78 | 412 | 34 | 1 | 5 | 116 |
4,017 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.ListProp
|
class ListProp(Property):
"""List property"""
def pythonize(self, val):
"""Convert value into a list::
* split value (or each element if value is a list) on coma char
* strip split values
:param val: value to convert
:type val: str
:return: list corresponding to value
:rtype: list
"""
if isinstance(val, list):
return [s.strip() if hasattr(s, "strip") else s
for s in list_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty]
return [s.strip() if hasattr(s, "strip") else s
for s in to_split(val, self.split_on_comma)
if hasattr(s, "strip") and s.strip() != '' or self.keep_empty]
|
class ListProp(Property):
'''List property'''
def pythonize(self, val):
'''Convert value into a list::
* split value (or each element if value is a list) on coma char
* strip split values
:param val: value to convert
:type val: str
:return: list corresponding to value
:rtype: list
'''
pass
| 2 | 2 | 19 | 3 | 8 | 8 | 4 | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 4 | 22 | 4 | 9 | 2 | 7 | 9 | 5 | 2 | 3 | 4 | 2 | 1 | 4 |
4,018 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestConfig
|
class TestConfig(PropertiesTester, AlignakTest):
unused_props = [
'log_file', 'object_cache_file', 'precached_object_file', 'resource_file',
'temp_file', 'temp_path', 'status_file', 'status_update_interval',
'command_check_interval', 'external_command_buffer_slots',
'check_for_updates', 'bare_update_checks',
'use_retained_program_state',
'use_retained_scheduling_info',
'retained_host_attribute_mask',
'retained_service_attribute_mask',
'retained_process_host_attribute_mask',
'retained_process_service_attribute_mask',
'retained_contact_host_attribute_mask',
'retained_contact_service_attribute_mask', 'sleep_time',
'service_inter_check_delay_method',
'service_interleave_factor', 'max_concurrent_checks',
'check_result_reaper_frequency',
'max_check_result_reaper_time', 'check_result_path',
'max_check_result_file_age', 'host_inter_check_delay_method',
'free_child_process_memory', 'child_processes_fork_twice',
'admin_email', 'admin_pager', 'event_broker_options',
'translate_passive_host_checks', 'passive_host_checks_are_soft'
]
without_default = []
properties = dict([
('clean_objects', False),
('forced_realms_hostgroups', True),
('program_start', 0),
('last_alive', 0),
('last_log_rotation', 0),
('last_command_check', 0),
('pid', 0),
('is_running', True),
('modified_host_attributes', 0),
('modified_service_attributes', 0),
('retain_state_information', True),
# ('passive_host_checks_enabled', True),
# ('passive_service_checks_enabled', True),
# ('active_host_checks_enabled', True),
# ('active_service_checks_enabled', True),
# ('event_handlers_enabled', True),
# ('flap_detection_enabled', True),
# ('notifications_enabled', True),
('daemon_mode', True),
# ('instance_name', ''),
('instance_id', ''),
('config_name', 'Main configuration'),
('alignak_name', ''),
('config_base_dir', ''),
# ('triggers_dir', ''),
('packs_dir', ''),
# ('resource_file', '/tmp/resources.txt'),
('enable_notifications', True),
('execute_service_checks', True),
('accept_passive_service_checks', True),
('execute_host_checks', True),
('accept_passive_host_checks', True),
('enable_event_handlers', True),
('log_rotation_method', 'd'),
('log_archive_path', '/usr/local/alignak/var/log/archives'),
('check_external_commands', True),
('main_config_file', ''),
('config_files', []),
('command_file', ''),
('state_retention_file', ''),
('state_retention_dir', ''),
('retention_update_interval', 0),
('use_syslog', False),
('log_filename', 'alignak-events.log'),
('log_level', ''),
('log_rotation_when', 'midnight'),
('log_rotation_interval', 1),
('log_rotation_count', 365),
('log_format', '[%(my_date)s] %(levelname)s: %(message)s'),
('log_date', '%Y-%m-%d %H:%M:%S'),
('log_notifications', True),
('log_alerts', True),
('log_acknowledgements', True),
('log_downtimes', True),
('log_snapshots', True),
('log_flappings', True),
('log_event_handlers', True),
('log_initial_states', False),
('log_external_commands', True),
('log_passive_checks', False),
('log_active_checks', False),
('log_alignak_checks', False),
('global_host_event_handler', ''),
('global_service_event_handler', ''),
('max_service_check_spread', 5),
('max_host_check_spread', 5),
('interval_length', 60),
('auto_reschedule_checks', True),
('auto_rescheduling_interval', 1),
('auto_rescheduling_window', 180),
('enable_predictive_host_dependency_checks', True),
('enable_predictive_service_dependency_checks', True),
('cached_host_check_horizon', 0),
('cached_service_check_horizon', 0),
('use_large_installation_tweaks', '0'),
('enable_environment_macros', False),
('enable_flap_detection', True),
('low_service_flap_threshold', 20),
('high_service_flap_threshold', 30),
('low_host_flap_threshold', 20),
('high_host_flap_threshold', 30),
('soft_state_dependencies', False),
('service_check_timeout', 60),
('host_check_timeout', 30),
('event_handler_timeout', 30),
('notification_timeout', 30),
('perfdata_timeout', 5),
('process_performance_data', True),
('host_perfdata_command', ''),
('service_perfdata_command', ''),
('host_perfdata_file', ''),
('service_perfdata_file', ''),
('host_perfdata_file_template', '/tmp/host.perf'),
('service_perfdata_file_template', '/tmp/host.perf'),
('host_perfdata_file_mode', 'a'),
('service_perfdata_file_mode', 'a'),
('host_perfdata_file_processing_interval', 15),
('service_perfdata_file_processing_interval', 15),
('host_perfdata_file_processing_command', None),
('service_perfdata_file_processing_command', None),
('check_for_orphaned_services', True),
('check_for_orphaned_hosts', True),
('check_service_freshness', True),
('service_freshness_check_interval', 60),
('check_host_freshness', True),
('host_freshness_check_interval', 60),
('additional_freshness_latency', 15),
('enable_embedded_perl', True),
('use_embedded_perl_implicitly', False),
('date_format', None),
('use_timezone', ''),
('illegal_object_name_chars', '`~!$%^&*"|\'<>?,()='),
('env_variables_prefix', 'ALIGNAK_'),
('illegal_macro_output_chars', ''),
('use_regexp_matching', False),
('use_true_regexp_matching', None),
('broker_module', ''),
('modified_attributes', 0),
# Alignak specific
('flap_history', 20),
('max_plugins_output_length', 8192),
('no_event_handlers_during_downtimes', True),
('cleaning_queues_interval', 900),
('enable_problem_impacts_states_change', True),
('resource_macros_names', []),
('accept_passive_unknown_check_results', True),
# Discovery part
('runners_timeout', 3600),
# ('pack_distribution_file', 'pack_distribution.dat'),
('daemon_thread_pool_size', 8),
('timeout_exit_status', 2),
# daemons part
('launch_missing_daemons', False),
('daemons_arguments', ''),
('daemons_initial_port', 10000),
('daemons_log_folder', '/usr/local/var/log/alignak'),
('daemons_check_period', 5),
('daemons_start_timeout', 1),
('daemons_dispatch_timeout', 5),
('daemons_new_conf_timeout', 1),
('daemons_stop_timeout', 5),
('daemons_failure_kill', True),
('alignak_env', []),
('events_date_format', '%Y-%m-%d %H:%M:%S'),
('events_log_count', 100),
])
def setUp(self):
super(TestConfig, self).setUp()
from alignak.objects.config import Config
self.item = Config()
|
class TestConfig(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0.09 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 188 | 13 | 161 | 7 | 158 | 14 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,019 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.IntegerProp
|
class IntegerProp(Property):
"""Integer property"""
def pythonize(self, val):
"""Convert value into an integer::
* If value is a list, try to take the last element
* Then call float(int(val))
:param val: value to convert
:type val:
:return: integer corresponding to value
:rtype: int
"""
return to_int(unique_value(val))
|
class IntegerProp(Property):
'''Integer property'''
def pythonize(self, val):
'''Convert value into an integer::
* If value is a list, try to take the last element
* Then call float(int(val))
:param val: value to convert
:type val:
:return: integer corresponding to value
:rtype: int
'''
pass
| 2 | 2 | 12 | 2 | 2 | 8 | 1 | 3 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 4 | 15 | 3 | 3 | 2 | 1 | 9 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
4,020 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/timeperiod.py
|
alignak.objects.timeperiod.Timeperiod
|
class Timeperiod(Item):
"""
Class to manage a timeperiod
A timeperiod is defined with range time (hours) of week to do action
and add day exceptions (like non working days)
"""
my_type = 'timeperiod'
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'timeperiod_name':
StringProp(fill_brok=[FULL_STATUS]),
'alias':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'use':
ListProp(default=[]),
'register':
IntegerProp(default=1),
# These are needed if a broker module calls methods on timeperiod objects
'dateranges':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'exclude':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'unresolved':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'invalid_entries':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'is_active':
BoolProp(default=False),
'activated_once':
BoolProp(default=False),
})
running_properties = Item.running_properties.copy()
def __init__(self, params, parsing=True):
# Get standard params
standard_params = dict(
[(k, v) for k, v in list(params.items()) if k in self.__class__.properties])
# Get timeperiod params (monday, tuesday, ...)
timeperiod_params = dict([(k, v) for k, v in list(params.items())
if k not in self.__class__.properties])
if 'dateranges' in standard_params and isinstance(standard_params['dateranges'], list) \
and standard_params['dateranges'] \
and isinstance(standard_params['dateranges'][0], dict):
new_list = []
for elem in standard_params['dateranges']:
cls = get_alignak_class(elem['__sys_python_module__'])
if cls:
new_list.append(cls(elem['content']))
# We recreate the object
self.dateranges = new_list
# And remove prop, to prevent from being overridden
del standard_params['dateranges']
# Handle standard params
super(Timeperiod, self).__init__(params=standard_params, parsing=parsing)
self.cache = {} # For tuning purpose only
self.invalid_cache = {} # same but for invalid search
# We use the uuid presence to assume we are un-serializing
if not parsing:
self.uuid = params['uuid']
else:
# Initial creation here, uuid already created in super
self.unresolved = []
self.dateranges = []
self.exclude = []
self.invalid_entries = []
self.is_active = False
self.activated_once = False
# Handle timeperiod params
for key, value in list(timeperiod_params.items()):
if isinstance(value, list):
if value:
value = value[-1]
else:
value = ''
self.unresolved.append(key + ' ' + value)
def __str__(self): # pragma: no cover
"""
Get readable object
:return: this object in readable format
:rtype: str
"""
string = '<Timeperiod%s %s, uuid=%s, ' \
% (' template' if self.is_a_template() else '', self.get_name(), self.uuid)
# string = str(self.__dict__) + '\n'
if getattr(self, 'unresolved'):
string += ', unresolved: ' + ', '.join([elt for elt in self.unresolved])
if getattr(self, 'dateranges'):
string += ', date ranges: '
for elt in self.dateranges:
(start, end) = elt.get_start_and_end_time()
start = time.asctime(time.localtime(start))
end = time.asctime(time.localtime(end))
string += str(elt) + " (" + str((start, end)) + ")"
if getattr(self, 'exclude'):
string += ', exclude: ' + ', '.join([str(elt) for elt in self.exclude])
string += ' />'
return string
def serialize(self, no_json=True, printing=False):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Timeperiod
:rtype: dict
"""
return super(Timeperiod, self).serialize(no_json=no_json, printing=printing)
def is_time_valid(self, timestamp):
"""
Check if a time is valid or not
:return: time is valid or not
:rtype: bool
"""
if hasattr(self, 'exclude'):
for daterange in self.exclude:
if daterange.is_time_valid(timestamp):
return False
for daterange in self.dateranges:
if daterange.is_time_valid(timestamp):
return True
return False
def get_min_from_t(self, timestamp):
"""
Get the first time > timestamp which is valid
:param timestamp: number of seconds
:type timestamp: int
:return: number of seconds
:rtype: int
TODO: not used, so delete it
"""
mins_incl = []
for daterange in self.dateranges:
mins_incl.append(daterange.get_min_from_t(timestamp))
return min(mins_incl)
def get_not_in_min_from_t(self, first):
"""
Get the first time > timestamp which is not valid
:return: None
TODO: not used, so delete it
"""
pass
def find_next_valid_time_from_cache(self, timestamp):
"""
Get the next valid time from cache
:param timestamp: number of seconds
:type timestamp: int
:return: Nothing or time in seconds
:rtype: None or int
"""
try:
return self.cache[timestamp]
except KeyError:
return None
def find_next_invalid_time_from_cache(self, timestamp):
"""
Get the next invalid time from cache
:param timestamp: number of seconds
:type timestamp: int
:return: Nothing or time in seconds
:rtype: None or int
"""
try:
return self.invalid_cache[timestamp]
except KeyError:
return None
def check_and_log_activation_change(self):
"""
Will look for active/un-active change of timeperiod.
In case it change, we log it like:
[1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>
States of is_active:
-1: default value when start
0: when timeperiod end
1: when timeperiod start
:return: None or a brok if TP changed
"""
now = int(time.time())
was_active = self.is_active
self.is_active = self.is_time_valid(now)
# If we got a change, log it!
if self.is_active != was_active:
_from = 0
_to = 0
# If it's the start, get a special value for was
if not self.activated_once:
_from = -1
self.activated_once = True
if was_active:
_from = 1
if self.is_active:
_to = 1
# Now raise the log
brok = make_monitoring_log(
'info', 'TIMEPERIOD TRANSITION: %s;%d;%d' % (self.get_name(), _from, _to)
)
return brok
return None
def clean_cache(self):
"""
Clean cache with entries older than now because not used in future ;)
:return: None
"""
now = int(time.time())
t_to_del = []
for timestamp in self.cache:
if timestamp < now:
t_to_del.append(timestamp)
for timestamp in t_to_del:
del self.cache[timestamp]
# same for the invalid cache
t_to_del = []
for timestamp in self.invalid_cache:
if timestamp < now:
t_to_del.append(timestamp)
for timestamp in t_to_del:
del self.invalid_cache[timestamp]
def get_next_valid_time_from_t(self, timestamp):
# pylint: disable=too-many-branches
"""
Get next valid time. If it's in cache, get it, otherwise define it.
The limit to find it is 1 year.
:param timestamp: number of seconds
:type timestamp: int or float
:return: Nothing or time in seconds
:rtype: None or int
"""
timestamp = int(timestamp)
original_t = timestamp
res_from_cache = self.find_next_valid_time_from_cache(timestamp)
if res_from_cache is not None:
return res_from_cache
still_loop = True
# Loop for all minutes...
while still_loop:
local_min = None
# Ok, not in cache...
dr_mins = []
for daterange in self.dateranges:
dr_mins.append(daterange.get_next_valid_time_from_t(timestamp))
s_dr_mins = sorted([d for d in dr_mins if d is not None])
for t01 in s_dr_mins:
if not self.exclude and still_loop:
# No Exclude so we are good
local_min = t01
still_loop = False
else:
for timeperiod in self.exclude:
if not timeperiod.is_time_valid(t01) and still_loop:
# OK we found a date that is not valid in any exclude timeperiod
local_min = t01
still_loop = False
if local_min is None:
# Looking for next invalid date
exc_mins = []
if s_dr_mins != []:
for timeperiod in self.exclude:
exc_mins.append(timeperiod.get_next_invalid_time_from_t(s_dr_mins[0]))
s_exc_mins = sorted([d for d in exc_mins if d is not None])
if s_exc_mins != []:
local_min = s_exc_mins[0]
if local_min is None:
still_loop = False
else:
timestamp = local_min
# No loop more than one year
if timestamp > original_t + 3600 * 24 * 366 + 1:
still_loop = False
local_min = None
# Ok, we update the cache...
self.cache[original_t] = local_min
return local_min
def get_next_invalid_time_from_t(self, timestamp):
# pylint: disable=too-many-branches
"""
Get the next invalid time
:param timestamp: timestamp in seconds (of course)
:type timestamp: int or float
:return: timestamp of next invalid time
:rtype: int or float
"""
timestamp = int(timestamp)
original_t = timestamp
dr_mins = []
for daterange in self.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
periods = merge_periods(dr_mins)
# manage exclude periods
dr_mins = []
for exclude in self.exclude:
for daterange in exclude.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
if not dr_mins:
periods_exclude = []
else:
periods_exclude = merge_periods(dr_mins)
if len(periods) >= 1:
# if first valid period is after original timestamp, the first invalid time
# is the original timestamp
if periods[0][0] > original_t:
return original_t
# check the first period + first period of exclude
if len(periods_exclude) >= 1:
if periods_exclude[0][0] < periods[0][1]:
return periods_exclude[0][0]
return periods[0][1]
return original_t
def is_correct(self):
"""Check if this object configuration is correct ::
* Check if dateranges of timeperiod are valid
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False if at least one daterange
is not correct
:rtype: bool
"""
state = True
for daterange in self.dateranges:
good = daterange.is_correct()
if not good:
self.add_error("[timeperiod::%s] invalid daterange '%s'"
% (self.get_name(), daterange))
state &= good
# Warn about non correct entries
for entry in self.invalid_entries:
self.add_error("[timeperiod::%s] invalid entry '%s'" % (self.get_name(), entry))
return super(Timeperiod, self).is_correct() and state
def resolve_daterange(self, dateranges, entry):
# pylint: disable=too-many-return-statements,too-many-statements,
# pylint: disable=too-many-branches,too-many-locals
"""
Try to solve dateranges (special cases)
:param dateranges: dateranges
:type dateranges: list
:param entry: property of timeperiod
:type entry: string
:return: None
"""
res = re.search(
r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry
)
if res is not None:
(syear, smon, smday, eyear, emon, emday, skip_interval, other) = res.groups()
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': skip_interval,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(r'(\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(syear, smon, smday, skip_interval, other) = res.groups()
eyear = syear
emon = smon
emday = smday
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': skip_interval,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(
r'(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry
)
if res is not None:
(syear, smon, smday, eyear, emon, emday, other) = res.groups()
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(r'(\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(syear, smon, smday, other) = res.groups()
eyear = syear
emon = smon
emday = smday
data = {'syear': syear, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': eyear, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(CalendarDaterange(data))
return
res = re.search(
r'([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) / (\d+)[\s\t]*([0-9:, -]+)',
entry
)
if res is not None:
(swday, swday_offset, smon, ewday,
ewday_offset, emon, skip_interval, other) = res.groups()
smon_id = Daterange.get_month_id(smon)
emon_id = Daterange.get_month_id(emon)
swday_id = Daterange.get_weekday_id(swday)
ewday_id = Daterange.get_weekday_id(ewday)
data = {'syear': 0, 'smon': smon_id, 'smday': 0, 'swday': swday_id,
'swday_offset': swday_offset, 'eyear': 0, 'emon': emon_id, 'emday': 0,
'ewday': ewday_id, 'ewday_offset': ewday_offset, 'skip_interval': skip_interval,
'other': other}
dateranges.append(MonthWeekDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, t01, emday, skip_interval, other) = res.groups()
if t00 in Daterange.weekdays and t01 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
ewday = Daterange.get_weekday_id(t01)
swday_offset = smday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset,
'skip_interval': skip_interval, 'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months and t01 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = Daterange.get_month_id(t01)
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': emon, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day' and t01 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': 0, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, emday, skip_interval, other) = res.groups()
if t00 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
swday_offset = smday
ewday = swday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset,
'skip_interval': skip_interval, 'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = smon
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': emon, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0, 'swday_offset': 0,
'eyear': 0, 'emon': 0, 'emday': emday, 'ewday': 0, 'ewday_offset': 0,
'skip_interval': skip_interval, 'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(
r'([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) [\s\t]*([0-9:, -]+)', entry
)
if res is not None:
(swday, swday_offset, smon, ewday, ewday_offset, emon, other) = res.groups()
smon_id = Daterange.get_month_id(smon)
emon_id = Daterange.get_month_id(emon)
swday_id = Daterange.get_weekday_id(swday)
ewday_id = Daterange.get_weekday_id(ewday)
data = {'syear': 0, 'smon': smon_id, 'smday': 0, 'swday': swday_id,
'swday_offset': swday_offset, 'eyear': 0, 'emon': emon_id, 'emday': 0,
'ewday': ewday_id, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(MonthWeekDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, emday, other) = res.groups()
if t00 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
swday_offset = smday
ewday = swday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = smon
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, smday, t01, emday, other) = res.groups()
if t00 in Daterange.weekdays and t01 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
ewday = Daterange.get_weekday_id(t01)
swday_offset = smday
ewday_offset = emday
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months and t01 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = Daterange.get_month_id(t01)
data = {'syear': 0, 'smon': smon, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day' and t01 == 'day':
data = {'syear': 0, 'smon': 0, 'smday': smday, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*) ([\d-]+) ([a-z]*)[\s\t]*([0-9:, -]+)', entry)
if res is not None:
(t00, t02, t01, other) = res.groups()
if t00 in Daterange.weekdays and t01 in Daterange.months:
swday = Daterange.get_weekday_id(t00)
smon = Daterange.get_month_id(t01)
emon = smon
ewday = swday
ewday_offset = t02
data = {'syear': 0, 'smon': smon, 'smday': 0, 'swday': swday,
'swday_offset': t02, 'eyear': 0, 'emon': emon, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(MonthWeekDayDaterange(data))
return
if not t01:
if t00 in Daterange.weekdays:
swday = Daterange.get_weekday_id(t00)
swday_offset = t02
ewday = swday
ewday_offset = swday_offset
data = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': swday,
'swday_offset': swday_offset, 'eyear': 0, 'emon': 0, 'emday': 0,
'ewday': ewday, 'ewday_offset': ewday_offset, 'skip_interval': 0,
'other': other}
dateranges.append(WeekDayDaterange(data))
return
if t00 in Daterange.months:
smon = Daterange.get_month_id(t00)
emon = smon
emday = t02
data = {'syear': 0, 'smon': smon, 'smday': t02, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': emon, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDateDaterange(data))
return
if t00 == 'day':
emday = t02
data = {'syear': 0, 'smon': 0, 'smday': t02, 'swday': 0,
'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': emday,
'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0,
'other': other}
dateranges.append(MonthDayDaterange(data))
return
res = re.search(r'([a-z]*)[\s\t]+([0-9:, -]+)', entry)
if res is not None:
(t00, other) = res.groups()
if t00 in Daterange.weekdays:
day = t00
data = {'day': day, 'other': other}
dateranges.append(StandardDaterange(data))
return
logger.info("[timeperiod::%s] no match for %s", self.get_name(), entry)
self.invalid_entries.append(entry)
def apply_inheritance(self):
"""
Do not inherit any property nor custom variables for a timeperiod
:return: None
"""
pass
def explode(self):
"""
Try to resolve all unresolved elements
:return: None
"""
for entry in self.unresolved:
self.resolve_daterange(self.dateranges, entry)
self.unresolved = []
def linkify(self, timeperiods):
"""
Will make timeperiod in exclude with id of the timeperiods
:param timeperiods: Timeperiods object
:type timeperiods:
:return: None
"""
new_exclude = []
if hasattr(self, 'exclude') and self.exclude != []:
logger.debug("[timeperiod::%s] have excluded %s", self.get_name(), self.exclude)
excluded_tps = self.exclude
for tp_name in excluded_tps:
timeperiod = timeperiods.find_by_name(tp_name.strip())
if timeperiod is not None:
new_exclude.append(timeperiod.uuid)
else:
self.add_error("[timeperiod::%s] unknown %s timeperiod"
% (self.get_name(), tp_name))
self.exclude = new_exclude
def check_exclude_rec(self):
# pylint: disable=access-member-before-definition
"""
Check if this timeperiod is tagged
:return: if tagged return false, if not true
:rtype: bool
"""
if self.rec_tag:
self.add_error("[timeperiod::%s] is in a loop in exclude parameter"
% self.get_name())
return False
self.rec_tag = True
for timeperiod in self.exclude:
timeperiod.check_exclude_rec()
return True
def fill_data_brok_from(self, data, brok_type):
"""
Add timeperiods from brok
:param data: timeperiod dictionnary
:type data: dict
:param brok_type: brok type
:type brok_type: string
:return: None
"""
cls = self.__class__
# Now config properties
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
# if 'fill_brok' in entry:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop)
elif entry.has_default:
data[prop] = entry.default
|
class Timeperiod(Item):
'''
Class to manage a timeperiod
A timeperiod is defined with range time (hours) of week to do action
and add day exceptions (like non working days)
'''
def __init__(self, params, parsing=True):
pass
def __str__(self):
'''
Get readable object
:return: this object in readable format
:rtype: str
'''
pass
def serialize(self, no_json=True, printing=False):
'''This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Timeperiod
:rtype: dict
'''
pass
def is_time_valid(self, timestamp):
'''
Check if a time is valid or not
:return: time is valid or not
:rtype: bool
'''
pass
def get_min_from_t(self, timestamp):
'''
Get the first time > timestamp which is valid
:param timestamp: number of seconds
:type timestamp: int
:return: number of seconds
:rtype: int
TODO: not used, so delete it
'''
pass
def get_not_in_min_from_t(self, first):
'''
Get the first time > timestamp which is not valid
:return: None
TODO: not used, so delete it
'''
pass
def find_next_valid_time_from_cache(self, timestamp):
'''
Get the next valid time from cache
:param timestamp: number of seconds
:type timestamp: int
:return: Nothing or time in seconds
:rtype: None or int
'''
pass
def find_next_invalid_time_from_cache(self, timestamp):
'''
Get the next invalid time from cache
:param timestamp: number of seconds
:type timestamp: int
:return: Nothing or time in seconds
:rtype: None or int
'''
pass
def check_and_log_activation_change(self):
'''
Will look for active/un-active change of timeperiod.
In case it change, we log it like:
[1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>
States of is_active:
-1: default value when start
0: when timeperiod end
1: when timeperiod start
:return: None or a brok if TP changed
'''
pass
def clean_cache(self):
'''
Clean cache with entries older than now because not used in future ;)
:return: None
'''
pass
def get_next_valid_time_from_t(self, timestamp):
'''
Get next valid time. If it's in cache, get it, otherwise define it.
The limit to find it is 1 year.
:param timestamp: number of seconds
:type timestamp: int or float
:return: Nothing or time in seconds
:rtype: None or int
'''
pass
def get_next_invalid_time_from_t(self, timestamp):
'''
Get the next invalid time
:param timestamp: timestamp in seconds (of course)
:type timestamp: int or float
:return: timestamp of next invalid time
:rtype: int or float
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check if dateranges of timeperiod are valid
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False if at least one daterange
is not correct
:rtype: bool
'''
pass
def resolve_daterange(self, dateranges, entry):
'''
Try to solve dateranges (special cases)
:param dateranges: dateranges
:type dateranges: list
:param entry: property of timeperiod
:type entry: string
:return: None
'''
pass
def apply_inheritance(self):
'''
Do not inherit any property nor custom variables for a timeperiod
:return: None
'''
pass
def explode(self):
'''
Try to resolve all unresolved elements
:return: None
'''
pass
def linkify(self, timeperiods):
'''
Will make timeperiod in exclude with id of the timeperiods
:param timeperiods: Timeperiods object
:type timeperiods:
:return: None
'''
pass
def check_exclude_rec(self):
'''
Check if this timeperiod is tagged
:return: if tagged return false, if not true
:rtype: bool
'''
pass
def fill_data_brok_from(self, data, brok_type):
'''
Add timeperiods from brok
:param data: timeperiod dictionnary
:type data: dict
:param brok_type: brok type
:type brok_type: string
:return: None
'''
pass
| 20 | 19 | 37 | 4 | 26 | 8 | 6 | 0.3 | 1 | 13 | 7 | 0 | 19 | 10 | 19 | 53 | 760 | 88 | 519 | 97 | 499 | 156 | 407 | 96 | 387 | 31 | 3 | 5 | 119 |
4,021 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.FloatProp
|
class FloatProp(Property):
"""Float property"""
def pythonize(self, val):
"""Convert value into a float::
* If value is a list, try to take the last element
* Then call float(val)
:param val: value to convert
:type val:
:return: float corresponding to value
:rtype: float
"""
return to_float(unique_value(val))
|
class FloatProp(Property):
'''Float property'''
def pythonize(self, val):
'''Convert value into a float::
* If value is a list, try to take the last element
* Then call float(val)
:param val: value to convert
:type val:
:return: float corresponding to value
:rtype: float
'''
pass
| 2 | 2 | 12 | 2 | 2 | 8 | 1 | 3 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 4 | 15 | 3 | 3 | 2 | 1 | 9 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
4,022 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.DictProp
|
class DictProp(Property):
"""Dict property
"""
# pylint: disable=keyword-arg-before-vararg
def __init__(self, elts_prop=None, *args, **kwargs):
"""Dictionary of values.
If elts_prop is not None, must be a Property subclass
All dict values will be casted as elts_prop values when pythonized
elts_prop = Property of dict members
"""
super(DictProp, self).__init__(*args, **kwargs)
if elts_prop is not None and not issubclass(elts_prop, Property):
raise TypeError("DictProp constructor only accept Property"
"sub-classes as elts_prop parameter")
self.elts_prop = None
if elts_prop is not None:
self.elts_prop = elts_prop()
def pythonize(self, val):
"""Convert value into a dict::
* If value is a list, try to take the last element
* split "key=value" string and convert to { key:value }
:param val: value to convert
:type val:
:return: log level corresponding to value
:rtype: str
"""
val = unique_value(val)
def split(keyval):
"""Split key-value string into (key,value)
:param keyval: key value string
:return: key, value
:rtype: tuple
"""
matches = re.match(r"^\s*([^\s]+)\s*=\s*([^\s]+)\s*$", keyval)
if matches is None:
raise ValueError
return (
matches.group(1),
# >2.4 only. we keep it for later. m.group(2) if self.elts_prop is None
# else self.elts_prop.pythonize(m.group(2))
(self.elts_prop.pythonize(matches.group(2)),
matches.group(2))[self.elts_prop is None]
)
if val is None:
return dict()
if self.elts_prop is None:
return val
# val is in the form "key1=addr:[port],key2=addr:[port],..."
return dict([split(kv) for kv in to_split(val)])
|
class DictProp(Property):
'''Dict property
'''
def __init__(self, elts_prop=None, *args, **kwargs):
'''Dictionary of values.
If elts_prop is not None, must be a Property subclass
All dict values will be casted as elts_prop values when pythonized
elts_prop = Property of dict members
'''
pass
def pythonize(self, val):
'''Convert value into a dict::
* If value is a list, try to take the last element
* split "key=value" string and convert to { key:value }
:param val: value to convert
:type val:
:return: log level corresponding to value
:rtype: str
'''
pass
def split(keyval):
'''Split key-value string into (key,value)
:param keyval: key value string
:return: key, value
:rtype: tuple
'''
pass
| 4 | 4 | 25 | 4 | 11 | 9 | 3 | 0.96 | 1 | 4 | 0 | 0 | 2 | 1 | 2 | 5 | 62 | 13 | 25 | 6 | 21 | 24 | 20 | 6 | 16 | 3 | 2 | 1 | 8 |
4,023 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.ConfigPathProp
|
class ConfigPathProp(StringProp):
""" A string property representing a config file path """
|
class ConfigPathProp(StringProp):
''' A string property representing a config file path '''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
4,024 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.CharProp
|
class CharProp(Property):
"""One character string property"""
def pythonize(self, val):
"""Convert value into a char ::
* If value is a list try, to take the last element
* Then take the first char of val (first elem)
:param val: value to convert
:type val:
:return: char corresponding to value
:rtype: str
"""
return to_char(unique_value(val))
|
class CharProp(Property):
'''One character string property'''
def pythonize(self, val):
'''Convert value into a char ::
* If value is a list try, to take the last element
* Then take the first char of val (first elem)
:param val: value to convert
:type val:
:return: char corresponding to value
:rtype: str
'''
pass
| 2 | 2 | 12 | 2 | 2 | 8 | 1 | 3 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 4 | 15 | 3 | 3 | 2 | 1 | 9 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
4,025 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.BoolProp
|
class BoolProp(Property):
"""A Boolean Property.
Boolean values are currently case insensitively defined as 0,
false, no, off for False, and 1, true, yes, on for True).
"""
def pythonize(self, val):
"""Convert value into a boolean
:param val: value to convert
:type val: bool, int, str
:return: boolean corresponding to value ::
{'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
:rtype: bool
"""
__boolean_states__ = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
if isinstance(val, bool):
return val
val = unique_value(val).lower()
if val in list(__boolean_states__.keys()):
return __boolean_states__[val]
raise PythonizeError("Cannot convert '%s' to a boolean value" % val)
|
class BoolProp(Property):
'''A Boolean Property.
Boolean values are currently case insensitively defined as 0,
false, no, off for False, and 1, true, yes, on for True).
'''
def pythonize(self, val):
'''Convert value into a boolean
:param val: value to convert
:type val: bool, int, str
:return: boolean corresponding to value ::
{'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
:rtype: bool
'''
pass
| 2 | 2 | 22 | 5 | 9 | 8 | 3 | 1.2 | 1 | 3 | 1 | 0 | 1 | 0 | 1 | 4 | 28 | 6 | 10 | 3 | 8 | 12 | 9 | 3 | 7 | 3 | 2 | 1 | 3 |
4,026 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.IntListProp
|
class IntListProp(ListProp):
"""Integer List property"""
def pythonize(self, val):
"""Convert value into a integer list::
* Try to convert into a list
* Convert each element into a int
:param val: value to convert
:type val:
:return: integer list corresponding to value
:rtype: list[int]
"""
val = super(IntListProp, self).pythonize(val)
try:
return [int(e) for e in val]
except ValueError as value_except:
raise PythonizeError(str(value_except))
|
class IntListProp(ListProp):
'''Integer List property'''
def pythonize(self, val):
'''Convert value into a integer list::
* Try to convert into a list
* Convert each element into a int
:param val: value to convert
:type val:
:return: integer list corresponding to value
:rtype: list[int]
'''
pass
| 2 | 2 | 16 | 2 | 6 | 8 | 2 | 1.29 | 1 | 5 | 1 | 0 | 1 | 0 | 1 | 5 | 18 | 2 | 7 | 3 | 5 | 9 | 7 | 2 | 5 | 2 | 3 | 1 | 2 |
4,027 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/servicedependency.py
|
alignak.objects.servicedependency.Servicedependency
|
class Servicedependency(Item):
"""Servicedependency class is a simple implementation of service dependency as
defined in a monitoring context (dependency period, notification_failure_criteria ..)
"""
my_type = "servicedependency"
my_name_property = "service_relation"
my_index_property = "service_relation"
properties = Item.properties.copy()
properties.update({
'dependent_host_name':
StringProp(),
'dependent_hostgroup_name':
StringProp(default=''),
'dependent_service_description':
StringProp(),
'host_name':
StringProp(),
'hostgroup_name':
StringProp(default=''),
'service_description':
StringProp(),
'inherits_parent':
BoolProp(default=False),
'execution_failure_criteria':
ListProp(default=['n'], split_on_comma=True),
'notification_failure_criteria':
ListProp(default=['n'], split_on_comma=True),
'dependency_period':
StringProp(default=''),
'explode_hostgroup':
BoolProp(default=False)
})
# def __str__(self): # pragma: no cover
# return '<Servicedependency %s %s, uuid=%s, use: %s />' \
# % ('template' if self.is_a_template() else '', self.get_full_name(), self.uuid,
# getattr(self, 'use', None))
# __repr__ = __str__
@property
def service_relation(self):
"""Unique key for a service dependency
:return: Tuple with host_name/service and dependent_host_name/service
:rtype: tuple
"""
return "{}/{}->{}/{}".format(getattr(self, 'host_name', 'unknown'),
getattr(self, 'service_description', 'unknown'),
getattr(self, 'dependent_host_name', 'independant'),
getattr(self, 'dependent_service_description', 'unknown'))
def get_full_name(self):
"""Get name based on 4 class attributes
Each attribute is replaced with 'unknown' if attribute is not set
:return: dependent_host_name/dependent_service_description..host_name/service_description
:rtype: str
"""
if self.is_a_template():
return self.get_name()
return "{}/{}->{}/{}".format(getattr(self, 'host_name', 'unknown'),
getattr(self, 'service_description', 'unknown'),
getattr(self, 'dependent_host_name', 'independant'),
getattr(self, 'dependent_service_description', 'unknown'))
|
class Servicedependency(Item):
'''Servicedependency class is a simple implementation of service dependency as
defined in a monitoring context (dependency period, notification_failure_criteria ..)
'''
@property
def service_relation(self):
'''Unique key for a service dependency
:return: Tuple with host_name/service and dependent_host_name/service
:rtype: tuple
'''
pass
def get_full_name(self):
'''Get name based on 4 class attributes
Each attribute is replaced with 'unknown' if attribute is not set
:return: dependent_host_name/dependent_service_description..host_name/service_description
:rtype: str
'''
pass
| 4 | 3 | 12 | 1 | 6 | 5 | 2 | 0.4 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 36 | 66 | 7 | 42 | 8 | 38 | 17 | 12 | 7 | 9 | 2 | 3 | 1 | 3 |
4,028 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestContact
|
class TestContact(PropertiesTester, AlignakTest):
unused_props = []
without_default = [
'contact_name'
]
properties = dict([
('alias', ''),
('host_notification_commands', []),
('service_notification_commands', []),
('host_notification_period', ''),
('service_notification_period', ''),
('service_notification_options', []),
('host_notification_options', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('contactgroups', []),
('host_notifications_enabled', True),
('service_notifications_enabled', True),
('min_business_impact', 0),
('email', 'none'),
('pager', 'none'),
('address1', 'none'),
('address2', 'none'),
('address3', 'none'),
('address4', 'none'),
('address5', 'none'),
('address6', 'none'),
('can_submit_commands', False),
('is_admin', False),
('expert', False),
('retain_status_information', True),
('notificationways', []),
('password', 'NOPASSWORDSET'),
])
def setUp(self):
super(TestContact, self).setUp()
from alignak.objects.contact import Contact
self.item = Contact({}, parsing=True)
|
class TestContact(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 45 | 4 | 41 | 7 | 38 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,029 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestServicegroup
|
class TestServicegroup(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['servicegroup_name']
properties = dict([
('alias', ''),
('members', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
# ('unknown_members', []),
('notes', ''),
('notes_url', ''),
('action_url', ''),
('servicegroup_members', []),
])
def setUp(self):
super(TestServicegroup, self).setUp()
from alignak.objects.servicegroup import Servicegroup
self.item = Servicegroup({}, parsing=True)
|
class TestServicegroup(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0.05 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 25 | 4 | 20 | 7 | 17 | 1 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,030 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/serviceescalation.py
|
alignak.objects.serviceescalation.Serviceescalations
|
class Serviceescalations(Items):
"""Serviceescalations manage a list of Serviceescalation objects, used for parsing configuration
"""
name_property = ""
inner_class = Serviceescalation
def explode(self, escalations):
"""Create instance of Escalation for each ServiceEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
# Now we explode all escalations (host_name, service_description) to escalations
for escalation in self:
properties = escalation.__class__.properties
host_name = getattr(escalation, 'host_name', '')
creation_dict = {
'escalation_name':
'Generated-SE-%s-%s' % (host_name, escalation.uuid)
}
for prop in properties:
if hasattr(escalation, prop):
creation_dict[prop] = getattr(escalation, prop)
escalations.add_escalation(Escalation(creation_dict))
|
class Serviceescalations(Items):
'''Serviceescalations manage a list of Serviceescalation objects, used for parsing configuration
'''
def explode(self, escalations):
'''Create instance of Escalation for each ServiceEscalation object
:param escalations: list of escalation, used to add new ones
:type escalations: alignak.objects.escalation.Escalations
:return: None
'''
pass
| 2 | 2 | 20 | 2 | 12 | 6 | 4 | 0.53 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 46 | 27 | 4 | 15 | 9 | 13 | 8 | 12 | 9 | 10 | 4 | 2 | 3 | 4 |
4,031 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/serviceextinfo.py
|
alignak.objects.serviceextinfo.ServicesExtInfo
|
class ServicesExtInfo(Items):
"""ServicesExtInfo manage ServiceExtInfo and propagate properties (listed before)
into Services if necessary
"""
name_property = "host_name"
inner_class = ServiceExtInfo
def merge(self, services):
"""Merge extended host information into services
:param services: services list, to look for a specific one
:type services: alignak.objects.service.Services
:return: None
"""
for extinfo in self:
if hasattr(extinfo, 'register') and not getattr(extinfo, 'register'):
# We don't have to merge template
continue
hosts_names = extinfo.get_name().split(",")
for host_name in hosts_names:
serv = services.find_srv_by_name_and_hostname(host_name,
extinfo.service_description)
if serv is not None:
# Fusion
self.merge_extinfo(serv, extinfo)
@staticmethod
def merge_extinfo(service, extinfo):
"""Merge extended host information into a service
:param service: the service to edit
:type service: alignak.objects.service.Service
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.serviceextinfo.ServiceExtInfo
:return: None
"""
properties = ['notes', 'notes_url', 'icon_image', 'icon_image_alt']
# service properties have precedence over serviceextinfo properties
for prop in properties:
if getattr(service, prop) == '' and getattr(extinfo, prop) != '':
setattr(service, prop, getattr(extinfo, prop))
|
class ServicesExtInfo(Items):
'''ServicesExtInfo manage ServiceExtInfo and propagate properties (listed before)
into Services if necessary
'''
def merge(self, services):
'''Merge extended host information into services
:param services: services list, to look for a specific one
:type services: alignak.objects.service.Services
:return: None
'''
pass
@staticmethod
def merge_extinfo(service, extinfo):
'''Merge extended host information into a service
:param service: the service to edit
:type service: alignak.objects.service.Service
:param extinfo: the external info we get data from
:type extinfo: alignak.objects.serviceextinfo.ServiceExtInfo
:return: None
'''
pass
| 4 | 3 | 16 | 1 | 8 | 8 | 4 | 0.95 | 1 | 0 | 0 | 0 | 1 | 0 | 2 | 47 | 42 | 5 | 19 | 12 | 15 | 18 | 17 | 11 | 14 | 5 | 2 | 3 | 8 |
4,032 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/servicegroup.py
|
alignak.objects.servicegroup.Servicegroup
|
class Servicegroup(Itemgroup):
"""
Class to manage a servicegroup
A servicegroup is used to group services
"""
my_type = 'servicegroup'
my_name_property = "%s_name" % my_type
members_property = "members"
group_members_property = "%s_members" % my_type
properties = Itemgroup.properties.copy()
properties.update({
'servicegroup_name':
StringProp(fill_brok=[FULL_STATUS]),
'alias':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'servicegroup_members':
ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True),
'notes':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'notes_url':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
'action_url':
StringProp(default=u'', fill_brok=[FULL_STATUS]),
})
macros = {
'SERVICEGROUPNAME': 'servicegroup_name',
'SERVICEGROUPALIAS': 'alias',
'SERVICEGROUPMEMBERS': 'members',
'SERVICEGROUPNOTES': 'notes',
'SERVICEGROUPNOTESURL': 'notes_url',
'SERVICEGROUPACTIONURL': 'action_url'
}
def get_services(self):
"""Get the services of the group
:return: list of services (members)
:rtype: list
"""
return super(Servicegroup, self).get_members()
def get_servicegroup_members(self):
"""Get the groups members of the group
:return: list of services
:rtype: list | str
"""
return getattr(self, 'servicegroup_members', [])
def get_services_by_explosion(self, servicegroups):
# pylint: disable=access-member-before-definition
"""
Get all services of this servicegroup and add it in members container
:param servicegroups: servicegroups object
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:return: return empty string or list of members
:rtype: str or list
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
servicegroup = servicegroups.find_by_name(sg_mbr.strip())
if servicegroup is not None:
value = servicegroup.get_services_by_explosion(servicegroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return ''
|
class Servicegroup(Itemgroup):
'''
Class to manage a servicegroup
A servicegroup is used to group services
'''
def get_services(self):
'''Get the services of the group
:return: list of services (members)
:rtype: list
'''
pass
def get_servicegroup_members(self):
'''Get the groups members of the group
:return: list of services
:rtype: list | str
'''
pass
def get_services_by_explosion(self, servicegroups):
'''
Get all services of this servicegroup and add it in members container
:param servicegroups: servicegroups object
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:return: return empty string or list of members
:rtype: str or list
'''
pass
| 4 | 4 | 18 | 3 | 8 | 8 | 3 | 0.53 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 47 | 92 | 14 | 51 | 16 | 47 | 27 | 30 | 16 | 26 | 7 | 4 | 3 | 9 |
4,033 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/servicegroup.py
|
alignak.objects.servicegroup.Servicegroups
|
class Servicegroups(Itemgroups):
"""
Class to manage all servicegroups
"""
inner_class = Servicegroup
def add_member(self, service_name, servicegroup_name):
"""Add a member (service) to this servicegroup
:param service_name: member (service) name
:type service_name: str
:param servicegroup_name: servicegroup name
:type servicegroup_name: str
:return: None
"""
servicegroup = self.find_by_name(servicegroup_name)
if servicegroup:
servicegroup.add_members(service_name)
return
servicegroup = Servicegroup({
'servicegroup_name': servicegroup_name, 'members': service_name})
self.add(servicegroup)
def get_members_of_group(self, gname):
"""Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the services in the group
:rtype: list[alignak.objects.service.Service]
"""
hostgroup = self.find_by_name(gname)
if hostgroup:
return hostgroup.get_services()
return []
def linkify(self, hosts, services):
"""
Link services with host
:param hosts: hosts object
:type hosts: alignak.objects.host.Hosts
:param services: services object
:type services: alignak.objects.service.Services
:return: None
"""
self.linkify_servicegroups_services(hosts, services)
def linkify_servicegroups_services(self, hosts, services):
"""
We just search for each host the id of the host
and replace the name by the id
TODO: very slow for high services, so search with host list,
not service one
:param hosts: hosts object
:type hosts: alignak.objects.host.Hosts
:param services: services object
:type services: alignak.objects.service.Services
:return: None
"""
for servicegroup in self:
members = servicegroup.get_services()
# Only on item in the expected couple
if len(members) == 1 and members[0] != '':
servicegroup.add_unknown_members('%s' % members[0])
continue
# The new member list, in id
new_members = []
seek = 0
host_name = ''
for member in members:
if not member:
continue
if seek % 2 == 0:
host_name = member.strip()
else:
service_desc = member.strip()
find = services.find_srv_by_name_and_hostname(host_name, service_desc)
if find is not None:
new_members.append(find.uuid)
else:
host = hosts.find_by_name(host_name)
if not (host and host.is_excluded_for_sdesc(service_desc)):
servicegroup.add_unknown_members('%s,%s' % (host_name, service_desc))
elif host:
self.add_warning('servicegroup %r : %s is excluded from the '
'services of the host %s'
% (servicegroup, service_desc, host_name))
seek += 1
# Make members uniq
new_members = list(set(new_members))
# We find the id, we replace the names
servicegroup.replace_members(new_members)
for srv_id in servicegroup.members:
serv = services[srv_id]
serv.servicegroups.append(servicegroup.uuid)
# and make this uniq
serv.servicegroups = list(set(serv.servicegroups))
def explode(self):
"""
Get services and put them in members container
:return: None
"""
# We do not want a same service group to be exploded again and again
# so we tag it
for tmp_sg in list(self.items.values()):
tmp_sg.already_exploded = False
for servicegroup in list(self.items.values()):
if servicegroup.already_exploded:
continue
# get_services_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_sg in list(self.items.values()):
tmp_sg.rec_tag = False
servicegroup.get_services_by_explosion(self)
# We clean the tags
for tmp_sg in list(self.items.values()):
if hasattr(tmp_sg, 'rec_tag'):
del tmp_sg.rec_tag
del tmp_sg.already_exploded
|
class Servicegroups(Itemgroups):
'''
Class to manage all servicegroups
'''
def add_member(self, service_name, servicegroup_name):
'''Add a member (service) to this servicegroup
:param service_name: member (service) name
:type service_name: str
:param servicegroup_name: servicegroup name
:type servicegroup_name: str
:return: None
'''
pass
def get_members_of_group(self, gname):
'''Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the services in the group
:rtype: list[alignak.objects.service.Service]
'''
pass
def linkify(self, hosts, services):
'''
Link services with host
:param hosts: hosts object
:type hosts: alignak.objects.host.Hosts
:param services: services object
:type services: alignak.objects.service.Services
:return: None
'''
pass
def linkify_servicegroups_services(self, hosts, services):
'''
We just search for each host the id of the host
and replace the name by the id
TODO: very slow for high services, so search with host list,
not service one
:param hosts: hosts object
:type hosts: alignak.objects.host.Hosts
:param services: services object
:type services: alignak.objects.service.Services
:return: None
'''
pass
def explode(self):
'''
Get services and put them in members container
:return: None
'''
pass
| 6 | 6 | 25 | 3 | 12 | 9 | 4 | 0.77 | 1 | 3 | 1 | 0 | 5 | 0 | 5 | 51 | 133 | 20 | 64 | 22 | 58 | 49 | 58 | 22 | 52 | 10 | 3 | 5 | 22 |
4,034 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_business_correlator_recursive_bp_rules.py
|
tests.test_business_correlator_recursive_bp_rules.TestBusinessCorrelatorRecursive
|
class TestBusinessCorrelatorRecursive(AlignakTest):
def setUp(self):
super(TestBusinessCorrelatorRecursive, self).setUp()
self.setup_with_file('cfg/cfg_business_correlator_recursive.cfg',
dispatching=True)
assert self.conf_is_correct
self._sched = self._scheduler
def test_recursive(self):
""" BR - recursive do not break python
ht34-peret-2-dif0, son of ht34-peret-2
ht34-peret-2-dif1, son of ht34-peret-2
ht34-peret-2 host state is 2,1,1 of (fid0 | dif1)
"""
# Get the standard hosts
host_dif0 = self._sched.hosts.find_by_name("ht34-peret-2-dif0")
host_dif0.act_depend_of = [] # no host checks on critical check results
host_dif1 = self._sched.hosts.find_by_name("ht34-peret-2-dif1")
host_dif1.act_depend_of = [] # no host checks on critical check results
# Get the BR main host - not a real host but a BR one...
host_main = self._sched.hosts.find_by_name("ht34-peret-2")
host_main.act_depend_of = [] # no host checks on critical check results
host_main.__class__.enable_problem_impacts_states_change = False
# Is a Business Rule, not a simple host...
assert host_main.got_business_rule
assert host_main.business_rule is not None
bp_rule = host_main.business_rule
print(("Host BR: %s" % bp_rule))
# Host BR:
# "Op:None Val:(u'1', u'1', u'1') Sons:['
# "Op:of: Val:(u'2', u'1', u'1') Sons:['
# "Op:host Val:(u'0', u'0', u'0') Sons:['c832bb0ad22c4700b16697cccbb6b782'] IsNot:False",
# "Op:host Val:(u'0', u'0', u'0') Sons:['596b9f36d1e94848ab145e3b43464645'] IsNot:False"
# '] IsNot:False"
# '] IsNot:False"
self.scheduler_loop(3, [
[host_dif0, 2, 'DOWN | value1=1 value2=2'],
[host_dif1, 2, 'DOWN | rtt=10']
])
print(("Host dif-0 state: %s / %s" % (host_dif0.state_type, host_dif0.state)))
print(("Host dif-1 state: %s / %s" % (host_dif1.state_type, host_dif1.state)))
assert 'DOWN' == host_dif0.state
assert 'HARD' == host_dif0.state_type
assert 'DOWN' == host_dif1.state
assert 'HARD' == host_dif1.state_type
# When all is ok, the BP rule state is 4: undetermined!
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 4 == state
|
class TestBusinessCorrelatorRecursive(AlignakTest):
def setUp(self):
pass
def test_recursive(self):
''' BR - recursive do not break python
ht34-peret-2-dif0, son of ht34-peret-2
ht34-peret-2-dif1, son of ht34-peret-2
ht34-peret-2 host state is 2,1,1 of (fid0 | dif1)
'''
pass
| 3 | 1 | 26 | 3 | 15 | 10 | 1 | 0.61 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 57 | 55 | 8 | 31 | 9 | 28 | 19 | 27 | 9 | 24 | 1 | 2 | 0 | 2 |
4,035 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_business_correlator_output.py
|
tests.test_business_correlator_output.TestBusinesscorrelOutput
|
class TestBusinesscorrelOutput(AlignakTest):
def setUp(self):
super(TestBusinesscorrelOutput, self).setUp()
self.setup_with_file('cfg/cfg_business_correlator_output.cfg',
dispatching=True)
assert self.conf_is_correct
self._sched = self._scheduler
def launch_internal_check(self, svc_br):
""" Launch an internal check for the business rule service provided """
# Launch an internal check
now = time.time()
self._sched.add(svc_br.launch_check(now - 1, self._sched.hosts, self._sched.services,
self._sched.timeperiods, self._sched.macromodulations,
self._sched.checkmodulations, self._sched.checks))
c = svc_br.actions[0]
assert True == c.internal
assert c.is_launchable(now)
# ask the scheduler to launch this check
# and ask 2 loops: one to launch the check
# and another to get the result
self.scheduler_loop(2, [])
# We should not have the check anymore
assert 0 == len(svc_br.actions)
def test_bprule_empty_output(self):
""" BR - empty output """
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"empty_bp_rule_output")
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "" == svc_cor.get_business_rule_output(self._sched.hosts,
self._sched.services,
self._sched.macromodulations,
self._sched.timeperiods)
def test_bprule_expand_template_macros(self):
""" BR - expand template macros"""
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"formatted_bp_rule_output")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "$STATUS$ $([$STATUS$: $FULLNAME$] )$" == \
svc_cor.business_rule_output_template
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
svc3 = self._sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
svc3.act_depend_of = [] # no host checks on critical check results
hst4 = self._sched.hosts.find_by_name("test_host_04")
hst4.act_depend_of = [] # no host checks on critical check results
self.scheduler_loop(3, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 1, 'WARNING test_host_02/srv2'],
[svc3, 2, 'CRITICAL test_host_03/srv3'],
[hst4, 2, 'DOWN test_host_04']])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'WARNING' == svc2.state
assert 'HARD' == svc2.state_type
assert 'CRITICAL' == svc3.state
assert 'HARD' == svc3.state_type
assert 'DOWN' == hst4.state
assert 'HARD' == hst4.state_type
time.sleep(1)
# Launch an internal check
self.launch_internal_check(svc_cor)
# Performs checks
m = MacroResolver()
template = "$STATUS$,$SHORTSTATUS$,$HOSTNAME$,$SERVICEDESC$,$FULLNAME$"
host = self._sched.hosts[svc1.host]
data = [host, svc1]
output = m.resolve_simple_macros_in_string(template, data,
self._sched.macromodulations,
self._sched.timeperiods)
assert "OK,O,test_host_01,srv1,test_host_01/srv1" == output
host = self._sched.hosts[svc2.host]
data = [host, svc2]
output = m.resolve_simple_macros_in_string(template, data,
self._sched.macromodulations,
self._sched.timeperiods)
assert "WARNING,W,test_host_02,srv2,test_host_02/srv2" == output
host = self._sched.hosts[svc3.host]
data = [host, svc3]
output = m.resolve_simple_macros_in_string(template, data,
self._sched.macromodulations,
self._sched.timeperiods)
assert "CRITICAL,C,test_host_03,srv3,test_host_03/srv3" == output
data = [hst4]
output = m.resolve_simple_macros_in_string(template, data,
self._sched.macromodulations,
self._sched.timeperiods)
assert "DOWN,D,test_host_04,,test_host_04" == output
host = self._sched.hosts[svc_cor.host]
data = [host, svc_cor]
output = m.resolve_simple_macros_in_string(template, data,
self._sched.macromodulations,
self._sched.timeperiods)
assert "CRITICAL,C,dummy,formatted_bp_rule_output,dummy/formatted_bp_rule_output" == \
output
def test_bprule_output(self):
""" BR - output """
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"formatted_bp_rule_output")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "$STATUS$ $([$STATUS$: $FULLNAME$] )$" == \
svc_cor.business_rule_output_template
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
svc3 = self._sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
svc3.act_depend_of = [] # no host checks on critical check results
hst4 = self._sched.hosts.find_by_name("test_host_04")
hst4.act_depend_of = [] # no host checks on critical check results
self.scheduler_loop(3, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 1, 'WARNING test_host_02/srv2'],
[svc3, 2, 'CRITICAL test_host_03/srv3'],
[hst4, 2, 'DOWN test_host_04']])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'WARNING' == svc2.state
assert 'HARD' == svc2.state_type
assert 'CRITICAL' == svc3.state
assert 'HARD' == svc3.state_type
assert 'DOWN' == hst4.state
assert 'HARD' == hst4.state_type
time.sleep(1)
# Launch an internal check
self.launch_internal_check(svc_cor)
# Performs checks
output = svc_cor.output
print(("BR output: %s" % output))
assert output.find("[WARNING: test_host_02/srv2]") > 0
assert output.find("[CRITICAL: test_host_03/srv3]") > 0
assert output.find("[DOWN: test_host_04]") > 0
# Should not display OK state checks
assert -1 == output.find("[OK: test_host_01/srv1]")
assert output.startswith("CRITICAL")
def test_bprule_xof_one_critical_output(self):
""" BR 3 of: - one CRITICAL output """
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"formatted_bp_rule_xof_output")
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "$STATUS$ $([$STATUS$: $FULLNAME$] )$" == \
svc_cor.business_rule_output_template
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
svc3 = self._sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
svc3.act_depend_of = [] # no host checks on critical check results
hst4 = self._sched.hosts.find_by_name("test_host_04")
hst4.act_depend_of = [] # no host checks on critical check results
self.scheduler_loop(3, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 0, 'OK test_host_02/srv2'],
[svc3, 2, 'CRITICAL test_host_03/srv3'],
[hst4, 0, 'UP test_host_04']])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'OK' == svc2.state
assert 'HARD' == svc2.state_type
assert 'CRITICAL' == svc3.state
assert 'HARD' == svc3.state_type
assert 'UP' == hst4.state
assert 'HARD' == hst4.state_type
time.sleep(1)
# Launch an internal check
self.launch_internal_check(svc_cor)
# Performs checks
assert 0 == svc_cor.business_rule.get_state(self._sched.hosts,
self._sched.services)
assert "OK [CRITICAL: test_host_03/srv3]" == svc_cor.output
def test_bprule_xof_all_ok_output(self):
""" BR - 3 of: all OK output """
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"formatted_bp_rule_xof_output")
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "$STATUS$ $([$STATUS$: $FULLNAME$] )$" == \
svc_cor.business_rule_output_template
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
svc3 = self._sched.services.find_srv_by_name_and_hostname("test_host_03", "srv3")
svc3.act_depend_of = [] # no host checks on critical check results
hst4 = self._sched.hosts.find_by_name("test_host_04")
hst4.act_depend_of = [] # no host checks on critical check results
self.scheduler_loop(3, [
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 0, 'OK test_host_02/srv2'],
[svc3, 0, 'OK test_host_03/srv3'],
[hst4, 0, 'UP test_host_04']])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'OK' == svc2.state
assert 'HARD' == svc2.state_type
assert 'OK' == svc3.state
assert 'HARD' == svc3.state_type
assert 'UP' == hst4.state
assert 'HARD' == hst4.state_type
time.sleep(1)
# Launch an internal check
self.launch_internal_check(svc_cor)
# Performs checks
assert 0 == svc_cor.business_rule.get_state(self._sched.hosts,
self._sched.services)
assert "OK all checks were successful." == svc_cor.output
|
class TestBusinesscorrelOutput(AlignakTest):
def setUp(self):
pass
def launch_internal_check(self, svc_br):
''' Launch an internal check for the business rule service provided '''
pass
def test_bprule_empty_output(self):
''' BR - empty output '''
pass
def test_bprule_expand_template_macros(self):
''' BR - expand template macros'''
pass
def test_bprule_output(self):
''' BR - output '''
pass
def test_bprule_xof_one_critical_output(self):
''' BR 3 of: - one CRITICAL output '''
pass
def test_bprule_xof_all_ok_output(self):
''' BR - 3 of: all OK output '''
pass
| 8 | 6 | 34 | 3 | 27 | 6 | 1 | 0.21 | 1 | 2 | 1 | 0 | 7 | 1 | 7 | 62 | 245 | 30 | 193 | 38 | 185 | 40 | 149 | 38 | 141 | 1 | 2 | 0 | 7 |
4,036 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestServiceextinfo
|
class TestServiceextinfo(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['host_name', 'service_description']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('notes', ''),
('notes_url', ''),
('icon_image', ''),
('icon_image_alt', ''),
])
def setUp(self):
super(TestServiceextinfo, self).setUp()
from alignak.objects.serviceextinfo import ServiceExtInfo
self.item = ServiceExtInfo({}, parsing=True)
|
class TestServiceextinfo(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 22 | 4 | 18 | 7 | 15 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,037 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_business_correlator_notifications.py
|
tests.test_business_correlator_notifications.TestBusinesscorrelNotifications
|
class TestBusinesscorrelNotifications(AlignakTest):
def setUp(self):
super(TestBusinesscorrelNotifications, self).setUp()
self.setup_with_file('cfg/cfg_business_correlator_notifications.cfg',
dispatching=True)
def test_bprule_standard_notifications(self):
"""Standard notifications for BP rules"""
svc_cor = self._scheduler.services.find_srv_by_name_and_hostname("dummy", "bp_rule_default")
svc_cor.act_depend_of = []
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert False is svc_cor.business_rule_smart_notifications
dummy = self._scheduler.hosts.find_by_name("dummy")
svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # ignore the host dependency
svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # ignore the host dependency
self.scheduler_loop(2, [
[dummy, 0, 'UP dummy'],
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 2, 'CRITICAL test_host_02/srv2']])
# HARD/CRITICAL so it is now a problem
assert svc2.is_problem
assert 2 == svc_cor.business_rule.get_state(self._scheduler.hosts,
self._scheduler.services)
# Acknowledge the faulty service
now = time.time()
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_02;srv2;2;1;1;lausser;blablub" % (now)
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
assert True is svc2.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc_cor, None, None]])
self.scheduler_loop(1, [[svc_cor, None, None]])
# The BR is now OK
assert 0 == svc_cor.business_rule.get_state(self._scheduler.hosts,
self._scheduler.services)
timeperiod = self._scheduler.timeperiods[svc_cor.notification_period]
# Notification is not blocked because all is ok
assert False is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
def test_bprule_smart_notifications_ack(self):
"""Smart notifications for BP rules"""
svc_cor = self._scheduler.services.find_srv_by_name_and_hostname("dummy", "bp_rule_smart_notif")
svc_cor.act_depend_of = []
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert True is svc_cor.business_rule_smart_notifications
dummy = self._scheduler.hosts.find_by_name("dummy")
svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # ignore the host dependency
svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # ignore the host dependency
self.scheduler_loop(2, [
[dummy, 0, 'UP dummy'],
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 2, 'CRITICAL test_host_02/srv2']])
# HARD/CRITICAL so it is now a problem
assert svc2.is_problem
assert 2 == svc_cor.business_rule.get_state(self._scheduler.hosts, self._scheduler.services)
timeperiod = self._scheduler.timeperiods[svc_cor.notification_period]
# Notification is not blocked
assert False is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
now = time.time()
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_02;srv2;2;1;1;lausser;blablub" % (now)
self._scheduler.run_external_commands([cmd])
assert True is svc2.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc_cor, None, None]])
self.scheduler_loop(1, [[svc_cor, None, None]])
# Notification is blocked because service is acknowledged
assert True is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
def test_bprule_smart_notifications_svc_ack_downtime(self):
"""Smart notifications for BP rules - ack / downtime"""
svc_cor = self._scheduler.services.find_srv_by_name_and_hostname("dummy", "bp_rule_smart_notif")
svc_cor.act_depend_of = []
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert True is svc_cor.business_rule_smart_notifications
assert False is svc_cor.business_rule_downtime_as_ack
dummy = self._scheduler.hosts.find_by_name("dummy")
svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # ignore the host dependency
svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # ignore the host dependency
self.scheduler_loop(2, [
[dummy, 0, 'UP dummy'],
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 2, 'CRITICAL test_host_02/srv2']])
assert 2 == svc_cor.business_rule.get_state(self._scheduler.hosts,
self._scheduler.services)
timeperiod = self._scheduler.timeperiods[svc_cor.notification_period]
host = self._scheduler.hosts[svc_cor.host]
assert False is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
duration = 600
now = time.time()
# fixed downtime valid for the next 10 minutes
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_02;srv2;%d;%d;1;;%d;lausser;blablub" % (
now, now, now + duration, duration
)
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc2.scheduled_downtime_depth > 0
assert False is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
# BR downtime is managed as an ack...
svc_cor.business_rule_downtime_as_ack = True
self.scheduler_loop(1, [[svc_cor, None, None]])
self.scheduler_loop(1, [[svc_cor, None, None]])
# ...s notifiction is blocked
assert True is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
def test_bprule_smart_notifications_hst_ack_downtime(self):
svc_cor = self._scheduler.services.find_srv_by_name_and_hostname("dummy", "bp_rule_smart_notif")
svc_cor.act_depend_of = []
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert True is svc_cor.business_rule_smart_notifications
assert False is svc_cor.business_rule_downtime_as_ack
dummy = self._scheduler.hosts.find_by_name("dummy")
svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # ignore the host dependency
svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # ignore the host dependency
hst2 = self._scheduler.hosts.find_by_name("test_host_02")
self.scheduler_loop(2, [
[dummy, 0, 'UP dummy'],
[svc1, 0, 'OK test_host_01/srv1'],
[svc2, 2, 'CRITICAL test_host_02/srv2']])
assert 2 == svc_cor.business_rule.get_state(self._scheduler.hosts,
self._scheduler.services)
timeperiod = self._scheduler.timeperiods[svc_cor.notification_period]
host = self._scheduler.hosts[svc_cor.host]
assert False is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
duration = 600
now = time.time()
# fixed downtime valid for the next 10 minutes
cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_host_02;%d;%d;1;;%d;lausser;blablub" % (
now, now, now + duration, duration
)
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert hst2.scheduled_downtime_depth > 0
# Notification is blocked because the downtime also set an acknowledge
svc_cor.business_rule_downtime_as_ack = True
assert True is svc_cor.is_blocking_notifications(timeperiod, self._scheduler.hosts,
self._scheduler.services, 'PROBLEM',
time.time())
def test_bprule_child_notification_options(self):
"""BR child notification options"""
svc_cor = self._scheduler.services.find_srv_by_name_and_hostname("dummy", "bp_rule_child_notif")
svc_cor.act_depend_of = []
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
hst2 = self._scheduler.hosts.find_by_name("test_host_02")
assert ['w', 'u', 'c', 'r', 's', 'x'] == svc1.notification_options
assert ['d', 'x', 'r', 's'] == hst2.notification_options
|
class TestBusinesscorrelNotifications(AlignakTest):
def setUp(self):
pass
def test_bprule_standard_notifications(self):
'''Standard notifications for BP rules'''
pass
def test_bprule_smart_notifications_ack(self):
'''Smart notifications for BP rules'''
pass
def test_bprule_smart_notifications_svc_ack_downtime(self):
'''Smart notifications for BP rules - ack / downtime'''
pass
def test_bprule_smart_notifications_hst_ack_downtime(self):
pass
def test_bprule_child_notification_options(self):
'''BR child notification options'''
pass
| 7 | 4 | 33 | 5 | 25 | 4 | 1 | 0.16 | 1 | 1 | 0 | 0 | 6 | 0 | 6 | 61 | 206 | 37 | 153 | 43 | 146 | 24 | 116 | 43 | 109 | 1 | 2 | 0 | 6 |
4,038 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_business_correlator.py
|
tests.test_business_correlator.TestBusinessCorrelator
|
class TestBusinessCorrelator(AlignakTest):
def setUp(self):
super(TestBusinessCorrelator, self).setUp()
self.setup_with_file('cfg/cfg_business_correlator.cfg',
dispatching=True)
assert self.conf_is_correct
self._sched = self._scheduler
def launch_internal_check(self, svc_br):
""" Launch an internal check for the business rule service provided """
# Launch an internal check
now = time.time()
self._sched.add(svc_br.launch_check(
now - 1, self._sched.hosts, self._sched.services,
self._sched.timeperiods, self._sched.macromodulations,
self._sched.checkmodulations, self._sched.checks))
c = svc_br.actions[0]
assert True == c.internal
assert c.is_launchable(now)
# ask the scheduler to launch this check
# and ask 2 loops: one to launch the check
# and another to get the result
self.scheduler_loop(2, [])
# We should not have the check anymore
assert 0 == len(svc_br.actions)
def test_br_creation(self):
""" BR - check creation of a simple services OR (db1 OR db2)
:return:
"""
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
svc_cor2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
# Is a Business Rule, not a simple service...
assert svc_cor2.got_business_rule
assert svc_cor2.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
# The BR command is: bp_rule!test_host_0,db1|test_host_0,db2
bp_rule = svc_cor.business_rule
assert isinstance(bp_rule, DependencyNode)
print(("BR scheduler: %s" % bp_rule))
# Get the BR associated with svc_cor
# The BR command is: bp_rule!test_host_0,db1|test_host_0,db2
bp_rule_arbiter = svc_cor2.business_rule
assert isinstance(bp_rule_arbiter, DependencyNode)
print(("BR arbiter: %s" % bp_rule_arbiter))
# Get the BR elements list
assert isinstance(bp_rule.list_all_elements(), list)
assert len(bp_rule.list_all_elements()) == 2
assert bp_rule.operand == '|'
assert bp_rule.of_values == ('2', '2', '2')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
# We've got 2 sons for the BR which are 2 dependency nodes
# Each dependency node has a son which is the service
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
def test_simple_or_business_correlator(self):
""" BR - try a simple services OR (db1 OR db2)
bp_rule!test_host_0,db1|test_host_0,db2
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '|'
assert bp_rule.of_values == ('2', '2', '2')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK | rtt=10'],
[svc_db2, 0, 'OK | value1=1 value2=2']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
# -----
# OK or OK -> OK
# -----
# When all is ok, the BP rule state is 0
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we set the db1 as soft/CRITICAL
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'SOFT' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
# The business rule must still be 0 - only hard states are considered
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# -----
# CRITICAL or OK -> OK
# -----
# The rule must still be a 0 (or inside)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we also set db2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
# -----
# CRITICAL or CRITICAL -> CRITICAL
# -----
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# And If we set db2 to WARNING?
self.scheduler_loop(2, [
[svc_db2, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# -----
# CRITICAL or WARNING -> WARNING
# -----
# Must be WARNING (better no 0 value)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# We acknowledge db2
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# -----
# CRITICAL or ACK(WARNING) -> OK
# -----
# Must be OK (ACK(WARNING) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# We unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# -----
# CRITICAL or DOWNTIME(WARNING) -> OK
# -----
# Must be OK (DOWNTIME(WARNING) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
def test_simple_or_business_correlator_with_schedule(self):
""" BR - try a simple services OR (db1 OR db2) with internal checks
bp_rule!test_host_0,db1|test_host_0,db2
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '|'
assert bp_rule.of_values == ('2', '2', '2')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK | rtt=10'],
[svc_db2, 0, 'OK | value1=1 value2=2']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
# -----
# OK or OK -> OK
# -----
# When all is ok, the BP rule state is 0
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Now we set the db1 as soft/CRITICAL
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'SOFT' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
# The business rule must still be 0
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# The rule must still be a 0 (or inside)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Now we also set db2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'CRITICAL' == svc_cor.state
assert 'SOFT' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'CRITICAL' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 2 == svc_cor.last_hard_state_id
# And If we set db2 to WARNING?
self.scheduler_loop(2, [
[svc_db2, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# Must be WARNING (better no 0 value)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'WARNING' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 1 == svc_cor.last_hard_state_id
# Assert that Simple_Or Is an impact of the problem db2
assert svc_cor.uuid in svc_db2.impacts
# and db1 too
assert svc_cor.uuid in svc_db1.impacts
# We acknowledge db2
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# Must be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# And in a HARD
# Launch internal check"
self.launch_internal_check(svc_cor)
assert 'OK' == svc_cor.state
assert'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# db2 WARNING, db1 CRITICAL, we unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# Must be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# And in a HARD
# Launch internal check
self.launch_internal_check(svc_cor)
assert 'OK' == svc_cor.state
assert 'HARD'== svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
def test_simple_or_not_business_correlator(self):
""" BR - try a simple services OR (db1 OR NOT db2)
bp_rule!test_host_0,db1|!test_host_0,db2
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or_not")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '|'
assert bp_rule.of_values == ('2', '2', '2')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
# This service is NOT valued
assert son.not_value == True
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK | rtt=10'],
[svc_db2, 0, 'OK | value1=1 value2=2']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
# -----
# OK or NOT OK -> OK
# -----
# When all is ok, the BP rule state is 0
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we set the db1 as soft/CRITICAL
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'SOFT' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
# The business rule must still be 0 - only hard states are considered
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# -----
# CRITICAL or NOT OK -> CRITICAL
# -----
# The rule must still be a 0 (or inside)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Now we also set db2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
# -----
# CRITICAL or NOT CRITICAL -> OK
# -----
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# And If we set db2 WARNING?
self.scheduler_loop(2, [
[svc_db2, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# -----
# CRITICAL or NOT WARNING -> WARNING
# -----
# Must be WARNING (better no 0 value)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# We acknowledge db2
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# -----
# CRITICAL or NOT ACK(WARNING) -> CRITICAL
# -----
# Must be WARNING (ACK(WARNING) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# We unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# -----
# CRITICAL or NOT DOWNTIME(WARNING) -> CRITICAL
# -----
# Must be CRITICAL (business_rule_downtime_as_ok -> DOWNTIME(WARNING) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
def test_simple_and_business_correlator(self):
""" BR - try a simple services AND (db1 AND db2)
bp_rule!test_host_0,db1&test_host_0,db2
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0",
"Simple_And")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '&'
assert bp_rule.of_values == ('2', '2', '2')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK | rtt=10'],
[svc_db2, 0, 'OK | value1=1 value2=2']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
# -----
# OK and OK -> OK
# -----
# When all is ok, the BP rule state is 0
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we set the db1 as soft/CRITICAL
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'SOFT' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
# The business rule must still be 0 because we want HARD states
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(2, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# -----
# CRITICAL and OK -> CRITICAL
# -----
# The rule must go CRITICAL
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Now we set db2 as WARNING/HARD...
self.scheduler_loop(2, [
[svc_db2, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# -----
# CRITICAL and WARNING -> CRITICAL
# -----
# The state of the rule remains 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# And If we set db1 to WARNING too?
self.scheduler_loop(2, [
[svc_db1, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 1 == svc_db1.last_hard_state_id
# -----
# WARNING and WARNING -> WARNING
# -----
# Must be WARNING (worse no 0 value for both)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# We set db2 CRITICAL then we acknowledge it
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# -----
# WARNING and ACK(CRITICAL) -> WARNING
# -----
# Must be WARNING (ACK(CRITICAL) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# We unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# -----
# WARNING and DOWNTIME(CRITICAL) -> WARNING
# -----
# Must be OK (DOWNTIME(CRITICAL) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
def test_simple_and_not_business_correlator(self):
""" BR - try a simple services AND NOT (db1 AND NOT db2)
bp_rule!test_host_0,db1&!test_host_0,db2
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0",
"Simple_And_not")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '&'
assert bp_rule.of_values == ('2', '2', '2')
# Not value remains False because one service is NOT ... but the BR is not NON
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
# This service is NOT valued
assert son.not_value == True
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Now start working on the states
self.scheduler_loop(2, [
[svc_db1, 0, 'OK | value1=1 value2=2'],
[svc_db2, 2, 'CRITICAL | rtt=10']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
# -----
# OK and not CRITICAL -> OK
# -----
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we set the db1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_db1, 2, 'CRITICAL | value1=1 value2=2']])
assert 'CRITICAL' == svc_db1.state
assert 'SOFT' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_db1, 2, 'CRITICAL | value1=1 value2=2']])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# -----
# CRITICAL and not CRITICAL -> CRITICAL
# -----
# The rule must go CRITICAL
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Now we also set db2 as WARNING/HARD...
self.scheduler_loop(2, [[svc_db2, 1, 'WARNING | value1=1 value2=2']])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# -----
# CRITICAL and not WARNING -> CRITICAL
# -----
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# And If we set db1 to WARNING too?
self.scheduler_loop(2, [[svc_db1, 1, 'WARNING | value1=1 value2=2']])
assert 'WARNING' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 1 == svc_db1.last_hard_state_id
# -----
# WARNING and not CRITICAL -> WARNING
# -----
# Must be WARNING (worse no 0 value for both)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# Now try to get ok in both place, should be bad :)
self.scheduler_loop(2, [[svc_db1, 0, 'OK | value1=1 value2=2'], [svc_db2, 0, 'OK | value1=1 value2=2']])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 0 == svc_db2.last_hard_state_id
# -----
# OK and not OK -> CRITICAL
# -----
# Must be CRITICAL (ok and not ok IS no OK :) )
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# We set db2 CRITICAL then we acknowledge it
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# -----
# OK and not ACK(CRITICAL) -> CRITICAL
# -----
# Must be CRITICAL (ACK(CRITICAL) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# We unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# -----
# OK and not DOWNTIME(CRITICAL) -> CRITICAL
# -----
# Must be CRITICAL (DOWNTIME(CRITICAL) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
def test_simple_1of_business_correlator(self):
""" BR - simple 1of: db1 OR/AND db2
bp_rule!1 of: test_host_0,db1|test_host_0,db2
"""
self.run_simple_1of_business_correlator()
def test_simple_1of_neg_business_correlator(self):
""" BR - simple -1of: db1 OR/AND db2
bp_rule!-1 of: test_host_0,db1|test_host_0,db2
"""
self.run_simple_1of_business_correlator(with_neg=True)
def test_simple_1of_pct_business_correlator(self):
""" BR - simple 50%of: db1 OR/AND db2
bp_rule!50% of: test_host_0,db1|test_host_0,db2
"""
self.run_simple_1of_business_correlator(with_pct=True)
def test_simple_1of_pct_neg_business_correlator(self):
""" BR - simple -50%of: db1 OR/AND db2
bp_rule!-50% of: test_host_0,db1|test_host_0,db2
"""
self.run_simple_1of_business_correlator(with_pct=True, with_neg=True)
def run_simple_1of_business_correlator(self, with_pct=False, with_neg=False):
"""
:param with_pct: True if a percentage is set
:param with_neg: True if a negation is set
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
if with_pct is True:
if with_neg is True:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_pct_neg")
else:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_pct")
else:
if with_neg is True:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_neg")
else:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1 and svc_db2
# and db1 and db2 should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == 'of:'
# Simple 1of: so in fact a triple ('1','2','2') (1of and MAX,MAX
if with_pct is True:
if with_neg is True:
assert ('-50%', '2', '2') == bp_rule.of_values
else:
assert ('50%', '2', '2') == bp_rule.of_values
else:
if with_neg is True:
assert ('-1', '2', '2') == bp_rule.of_values
else:
assert ('1', '2', '2') == bp_rule.of_values
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# We've got 2 sons for the BR which are 2 dependency nodes
# Each dependency node has a son which is the service
assert 2 == len(bp_rule.sons)
# First son is linked to a service and we have its uuid
son = bp_rule.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son is also a service
son = bp_rule.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK | rtt=10'],
[svc_db2, 0, 'OK | value1=1 value2=2']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
# -----
# OK 1of OK -> OK
# -----
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we set the db1 as soft/CRITICAL
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'SOFT' == svc_db1.state_type
assert 0 == svc_db1.last_hard_state_id
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(1, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# -----
# CRITCAL 1of OK -> OK
# -----
# The rule still be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we also set db2 as CRITICAL/HARD...
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
# -----
# CRITICAL 1of CRITICAL -> CRITICAL
# -----
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# And If we set db1 WARNING now?
self.scheduler_loop(2, [[svc_db1, 1, 'WARNING | value1=1 value2=2']])
assert 'WARNING' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 1 == svc_db1.last_hard_state_id
# -----
# WARNING 1of CRITICAL -> WARNING
# -----
# Must be WARNING (worse no 0 value for both, like for AND rule)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# We acknowledge bd2
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# -----
# WARNING 1of ACK(CRITICAL) -> OK
# -----
# Must be OK (ACK(CRITICAL) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# We unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# -----
# WARNING 1of DOWNTIME(CRITICAL) -> OK
# -----
# Must be OK (DOWNTIME(CRITICAL) is OK)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
def test_simple_1of_business_correlator_with_hosts(self):
""" BR - simple 1of: test_router_0 OR/AND test_host_0"""
self.run_simple_1of_business_correlator_with_hosts()
def test_simple_1of_neg_business_correlator_with_hosts(self):
""" BR - -1of: test_router_0 OR/AND test_host_0 """
self.run_simple_1of_business_correlator_with_hosts(with_neg=True)
def test_simple_1of_pct_business_correlator_with_hosts(self):
""" BR - simple 50%of: test_router_0 OR/AND test_host_0 """
self.run_simple_1of_business_correlator_with_hosts(with_pct=True)
def test_simple_1of_pct_neg_business_correlator_with_hosts(self):
""" BR - simple -50%of: test_router_0 OR/AND test_host_0 """
self.run_simple_1of_business_correlator_with_hosts(with_pct=True, with_neg=True)
def run_simple_1of_business_correlator_with_hosts(self, with_pct=False, with_neg=False):
"""
:param with_pct: True if a percentage is set
:param with_neg: True if a negation is set
:return:
"""
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
if with_pct is True:
if with_neg is True:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_pct_neg")
else:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_pct")
else:
if with_neg is True:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_neg")
else:
svc_cor = self._sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == 'of:'
# Simple 1of: so in fact a triple ('1','2','2') (1of and MAX,MAX
if with_pct is True:
if with_neg is True:
assert ('-50%', '2', '2') == bp_rule.of_values
else:
assert ('50%', '2', '2') == bp_rule.of_values
else:
if with_neg is True:
assert ('-1', '2', '2') == bp_rule.of_values
else:
assert ('1', '2', '2') == bp_rule.of_values
sons = bp_rule.sons
print("Sons,", sons)
# We've got 2 sons, 2 services nodes
assert 2 == len(sons)
assert 'host' == sons[0].operand
assert host.uuid == sons[0].sons[0]
assert 'host' == sons[1].operand
assert router.uuid == sons[1].sons[0]
def test_dep_node_list_elements(self):
""" BR - list all elements
:return:
"""
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
assert False == svc_db1.got_business_rule
assert None is svc_db1.business_rule
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
assert False == svc_db2.got_business_rule
assert None is svc_db2.business_rule
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
svc_cor.act_depend_of = [] # no host checks on critical check results
assert True == svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '|' == bp_rule.operand
print("All elements", bp_rule.list_all_elements())
all_elements = bp_rule.list_all_elements()
assert 2 == len(all_elements)
assert svc_db2.uuid in all_elements
assert svc_db1.uuid in all_elements
def test_full_erp_rule_with_schedule(self):
""" Full ERP rule with real checks scheduled
bp_rule!(test_host_0,db1|test_host_0,db2) & (test_host_0,web1|test_host_0,web2)
& (test_host_0,lvs1|test_host_0,lvs2)
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_web1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "web1")
svc_web1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_web1.got_business_rule
assert svc_web1.business_rule is None
svc_web2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "web2")
svc_web2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_web2.got_business_rule
assert svc_web2.business_rule is None
svc_lvs1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs1")
svc_lvs1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_lvs1.got_business_rule
assert svc_lvs1.business_rule is None
svc_lvs2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs2")
svc_lvs2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_lvs2.got_business_rule
assert svc_lvs2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "ERP")
svc_cor.act_depend_of = [] # no host checks on critical check results
assert True == svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '&' == bp_rule.operand
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1, svc_db2, ...
# and they should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_cor.uuid in svc_web1.child_dependencies
assert svc_cor.uuid in svc_web2.child_dependencies
assert svc_cor.uuid in svc_lvs1.child_dependencies
assert svc_cor.uuid in svc_lvs2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
assert svc_web1.uuid in svc_cor.parent_dependencies
assert svc_web2.uuid in svc_cor.parent_dependencies
assert svc_lvs1.uuid in svc_cor.parent_dependencies
assert svc_lvs2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '&'
assert bp_rule.of_values == ('3', '3', '3')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 3 == len(bp_rule.sons)
# First son is an OR rule for the DB node
db_node = bp_rule.sons[0]
assert isinstance(db_node, DependencyNode)
assert db_node.operand == '|'
assert db_node.of_values == ('2', '2', '2')
assert db_node.not_value == False
assert db_node.sons is not None
assert db_node.sons is not []
assert 2 == len(db_node.sons)
# First son of DB node is linked to a service and we have its uuid
son = db_node.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son of DB node is also a service
son = db_node.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Second son is an OR rule for the Web node
web_node = bp_rule.sons[1]
assert isinstance(web_node, DependencyNode)
assert web_node.operand == '|'
assert web_node.of_values == ('2', '2', '2')
assert web_node.not_value == False
assert web_node.sons is not None
assert web_node.sons is not []
assert 2 == len(web_node.sons)
# First son of Web node is linked to a service and we have its uuid
son = web_node.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_web1.uuid
# Second son of Web node is also a service
son = web_node.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_web2.uuid
# First son is an OR rule for the LVS node
lvs_node = bp_rule.sons[2]
assert isinstance(lvs_node, DependencyNode)
assert lvs_node.operand == '|'
assert lvs_node.of_values == ('2', '2', '2')
assert lvs_node.not_value == False
assert lvs_node.sons is not None
assert lvs_node.sons is not []
assert 2 == len(lvs_node.sons)
# First son of LVS node is linked to a service and we have its uuid
son = lvs_node.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_lvs1.uuid
# Second son of LVS node is also a service
son = lvs_node.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_lvs2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK'],
[svc_db2, 0, 'OK'],
[svc_web1, 0, 'OK'],
[svc_web2, 0, 'OK'],
[svc_lvs1, 0, 'OK'],
[svc_lvs2, 0, 'OK'],
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 'OK' == svc_web1.state
assert 'HARD' == svc_web1.state_type
assert 'OK' == svc_web2.state
assert 'HARD' == svc_web2.state_type
assert 'OK' == svc_lvs1.state
assert 'HARD' == svc_lvs1.state_type
assert 'OK' == svc_lvs2.state
assert 'HARD' == svc_lvs2.state_type
# -----
# (OK or OK) and (OK or OK) and (OK or OK) -> OK
# -----
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(2, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# -----
# (CRITICAL or OK) and (OK or OK) and (OK or OK) -> OK
# 1st OK because OK or CRITICAL -> OK
# -----
# The rule must still be a 0 (or inside)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
print("ERP: Look at svc_cor state", svc_cor.state)
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Now we also set db2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
# -----
# (CRITICAL or CRITICAL) and (OK or OK) and (OK or OK) -> OK
# 1st CRITICAL because CRITICAL or CRITICAL -> CRITICAL
# -----
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
# And now we must be CRITICAL/SOFT
assert 'CRITICAL' == svc_cor.state
assert 'SOFT' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
# And now we must be CRITICAL/HARD
assert 'CRITICAL' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 2 == svc_cor.last_hard_state_id
# And If we set db2 to WARNING?
self.scheduler_loop(2, [
[svc_db2, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# -----
# (CRITICAL or WARNING) and (OK or OK) and (OK or OK) -> OK
# 1st WARNING because CRITICAL or WARNING -> WARNING
# -----
# Must be WARNING (better no 0 value)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# And in a HARD
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'WARNING' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 1 == svc_cor.last_hard_state_id
# Assert that ERP Is an impact of the problem db2
assert svc_cor.uuid in svc_db2.impacts
# and db1 too
assert svc_cor.uuid in svc_db1.impacts
# And now all is green :)
self.scheduler_loop(2, [
[svc_db1, 0, 'OK'],
[svc_db2, 0, 'OK'],
])
# Launch an internal check
self.launch_internal_check(svc_cor)
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# And no more in impact
assert svc_cor not in svc_db2.impacts
assert svc_cor not in svc_db1.impacts
# And what if we set 2 service from distant rule CRITICAL?
# ERP should be still OK
self.scheduler_loop(2, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2'],
[svc_web1, 2, 'CRITICAL | value1=1 value2=2'],
[svc_lvs1, 2, 'CRITICAL | value1=1 value2=2']
])
# Launch an internal check
self.launch_internal_check(svc_cor)
# -----
# (CRITICAL or OK) and (OK or OK) and (OK or OK) -> OK
# All OK because CRITICAL or OK -> OK
# -----
# What is the svc_cor state now?
assert 'OK' == svc_cor.state
assert 'HARD' == svc_cor.state_type
assert 0 == svc_cor.last_hard_state_id
# We set bd 2 to CRITICAL and acknowledge it
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# -----
# (CRITICAL or ACK(CRITICAL)) and (OK or OK) and (OK or OK) -> OK
# All OK because CRITICAL or ACK(CRITICAL) -> OK
# -----
# Must be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# We unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
# -----
# (CRITICAL or DOWNTIME(CRITICAL)) and (OK or OK) and (OK or OK) -> OK
# All OK because CRITICAL or DOWNTIME(CRITICAL) -> OK
# -----
# Must be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
def test_complex_ABCof_business_correlator(self):
""" BR - complex -bp_rule!5,1,1 of: test_host_0,A|test_host_0,B|test_host_0,C|
test_host_0,D|test_host_0,E """
self.run_complex_ABCof_business_correlator(with_pct=False)
def test_complex_ABCof_pct_business_correlator(self):
""" BR - complex bp_rule!100%,20%,20% of: test_host_0,A|test_host_0,B|test_host_0,C|
test_host_0,D|test_host_0,E """
self.run_complex_ABCof_business_correlator(with_pct=True)
def run_complex_ABCof_business_correlator(self, with_pct=False):
"""
:param with_pct: True if a percentage is set
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
A = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "A")
assert False == A.got_business_rule
assert None is A.business_rule
A.act_depend_of = []
B = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "B")
assert False == B.got_business_rule
assert None is B.business_rule
B.act_depend_of = []
C = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "C")
assert False == C.got_business_rule
assert None is C.business_rule
C.act_depend_of = []
D = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "D")
assert False == D.got_business_rule
assert None is D.business_rule
D.act_depend_of = []
E = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "E")
assert False == E.got_business_rule
assert None is E.business_rule
E.act_depend_of = []
if with_pct == False:
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0",
"Complex_ABCOf")
else:
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0",
"Complex_ABCOf_pct")
svc_cor.act_depend_of = [] # no host checks on critical check results
# Is a Business Rule, not a simple service...
assert svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == 'of:'
if with_pct == False:
assert ('5', '1', '1') == bp_rule.of_values
else:
assert ('100%', '20%', '20%') == bp_rule.of_values
assert bp_rule.is_of_mul == True
assert bp_rule.sons is not None
assert 5 == len(bp_rule.sons)
# We've got 5 sons for the BR which are 5 dependency nodes
# Each dependency node has a son which is the service
sons = bp_rule.sons
assert 'service' == sons[0].operand
assert A.uuid == sons[0].sons[0]
assert 'service' == sons[1].operand
assert B.uuid == sons[1].sons[0]
assert 'service' == sons[2].operand
assert C.uuid == sons[2].sons[0]
assert 'service' == sons[3].operand
assert D.uuid == sons[3].sons[0]
assert 'service' == sons[4].operand
assert E.uuid == sons[4].sons[0]
# Now start working on the states
self.scheduler_loop(1, [
[A, 0, 'OK'], [B, 0, 'OK'], [C, 0, 'OK'], [D, 0, 'OK'], [E, 0, 'OK']
])
assert 'OK' == A.state
assert 'HARD' == A.state_type
assert 'OK' == B.state
assert 'HARD' == B.state_type
assert 'OK' == C.state
assert 'HARD' == C.state_type
assert 'OK' == D.state
assert 'HARD' == D.state_type
assert 'OK' == E.state
assert 'HARD' == E.state_type
# -----
# All OK with a 5,1,1 of: -> OK
# -----
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we set the A as CRITICAL/HARD
self.scheduler_loop(2, [[A, 2, 'CRITICAL']])
assert 'CRITICAL' == A.state
assert 'HARD' == A.state_type
assert 2 == A.last_hard_state_id
# -----
# All OK except 1 with 5,1,1 of: -> CRITICAL
# -----
# The rule is 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Now we also set B as CRITICAL/HARD...
self.scheduler_loop(2, [[B, 2, 'CRITICAL']])
assert 'CRITICAL' == B.state
assert 'HARD' == B.state_type
assert 2 == B.last_hard_state_id
# -----
# All OK except 2 with 5,1,1 of: -> CRITICAL
# -----
# The state of the rule remains 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# And If we set A and B WARNING now?
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 1, 'WARNING']])
assert 'WARNING' == A.state
assert 'HARD' == A.state_type
assert 1 == A.last_hard_state_id
assert 'WARNING' == B.state
assert 'HARD' == B.state_type
assert 1 == B.last_hard_state_id
# -----
# All OK except 2 WARNING with 5,1,1 of: -> WARNING
# -----
# Must be WARNING (worse no 0 value for both, like for AND rule)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
print("state", state)
assert 1 == state
# Ok now more fun, with changing of_values and states
### W O O O O
# 4 of: -> Ok (we got 4 OK, and not 4 warn or crit, so it's OK)
# 5,1,1 -> Warning (at least one warning, and no crit -> warning)
# 5,2,1 -> OK (we want warning only if we got 2 bad states, so not here)
# Set one as WARNING and all others as OK
self.scheduler_loop(1, [
[A, 1, 'WARNING'], [B, 0, 'OK'], [C, 0, 'OK'], [D, 0, 'OK'], [E, 0, 'OK']
])
# 4 of: -> 4,5,5
if with_pct == False:
bp_rule.of_values = ('4', '5', '5')
else:
bp_rule.of_values = ('80%', '100%', '100%')
bp_rule.is_of_mul = False
# -----
# All OK except 1 with 4of: -> OK
# -----
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# 5,1,1
if with_pct == False:
bp_rule.of_values = ('5', '1', '1')
else:
bp_rule.of_values = ('100%', '20%', '20%')
bp_rule.is_of_mul = True
assert 1 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# 5,2,1
if with_pct == False:
bp_rule.of_values = ('5', '2', '1')
else:
bp_rule.of_values = ('100%', '40%', '20%')
bp_rule.is_of_mul = True
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
###* W C O O O
# 4 of: -> Critical (not 4 ok, so we take the worse state, the critical)
# 4,1,1 -> Critical (2 states raise the waring, but on raise critical, so worse state is critical)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 2, 'Crit']])
# 4 of: -> 4,5,5
if with_pct == False:
bp_rule.of_values = ('4', '5', '5')
else:
bp_rule.of_values = ('80%', '100%', '100%')
bp_rule.is_of_mul = False
assert 2 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('40%', '20%', '20%')
bp_rule.is_of_mul = True
assert 2 == bp_rule.get_state(self._sched.hosts, self._sched.services)
##* W C C O O
# * 2 of: OK
# * 4,1,1 -> Critical (same as before)
# * 4,1,3 -> warning (the warning rule is raised, but the critical is not)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 2, 'Crit'], [C, 2, 'Crit']])
# * 2 of: 2,5,5
if with_pct == False:
bp_rule.of_values = ('2', '5', '5')
else:
bp_rule.of_values = ('40%', '100%', '100%')
bp_rule.is_of_mul = False
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# * 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('80%', '20%', '20%')
bp_rule.is_of_mul = True
assert 2 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# * 4,1,3
if with_pct == False:
bp_rule.of_values = ('4', '1', '3')
else:
bp_rule.of_values = ('80%', '20%', '60%')
bp_rule.is_of_mul = True
assert 1 == bp_rule.get_state(self._sched.hosts, self._sched.services)
##* W ACK(C) C O O
# * 3 of: OK
# * 4,1,1 -> Critical (same as before)
# * 4,1,2 -> Warning
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;B;2;1;1;lausser;blablub" % (now)
self._sched.run_external_commands([cmd])
if with_pct == False:
bp_rule.of_values = ('3', '5', '5')
else:
bp_rule.of_values = ('60%', '100%', '100%')
bp_rule.is_of_mul = False
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# * 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('80%', '20%', '20%')
bp_rule.is_of_mul = True
assert 2 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# * 4,1,3
if with_pct == False:
bp_rule.of_values = ('4', '1', '2')
else:
bp_rule.of_values = ('80%', '20%', '40%')
bp_rule.is_of_mul = True
assert 1 == bp_rule.get_state(self._sched.hosts, self._sched.services)
##* W DOWNTIME(C) C O O
# * 3 of: OK
# * 4,1,1 -> Critical (same as before)
# * 4,1,2 -> Warning
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;B" % now
self._sched.run_external_commands([cmd])
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;B;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
if with_pct == False:
bp_rule.of_values = ('3', '5', '5')
else:
bp_rule.of_values = ('60%', '100%', '100%')
bp_rule.is_of_mul = False
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# * 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('80%', '20%', '20%')
bp_rule.is_of_mul = True
assert 2 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# * 4,1,3
if with_pct == False:
bp_rule.of_values = ('4', '1', '2')
else:
bp_rule.of_values = ('80%', '20%', '40%')
bp_rule.is_of_mul = True
assert 1 == bp_rule.get_state(self._sched.hosts, self._sched.services)
# We will try a simple db1 OR db2
def test_multi_layers(self):
""" BR - multi-levels rule
bp_rule!(test_host_0,db1| (test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2) ) )
& test_router_0
:return:
"""
now = time.time()
# Get the hosts
host = self._sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore its parent
router = self._sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore its parent
# Get the services
svc_db1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
svc_db1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db1.got_business_rule
assert svc_db1.business_rule is None
svc_db2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
svc_db2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_db2.got_business_rule
assert svc_db2.business_rule is None
svc_lvs1 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs1")
svc_lvs1.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_lvs1.got_business_rule
assert svc_lvs1.business_rule is None
svc_lvs2 = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs2")
svc_lvs2.act_depend_of = [] # no host checks on critical check results
# Not a BR, a simple service
assert not svc_lvs2.got_business_rule
assert svc_lvs2.business_rule is None
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "Multi_levels")
svc_cor.act_depend_of = [] # no host checks on critical check results
assert True == svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '&' == bp_rule.operand
# We check for good parent/childs links
# So svc_cor should be a son of svc_db1, svc_db2, ...
# and they should be parents of svc_cor
assert svc_cor.uuid in svc_db1.child_dependencies
assert svc_cor.uuid in svc_db2.child_dependencies
assert svc_cor.uuid in svc_lvs1.child_dependencies
assert svc_cor.uuid in svc_lvs2.child_dependencies
assert svc_db1.uuid in svc_cor.parent_dependencies
assert svc_db2.uuid in svc_cor.parent_dependencies
assert svc_lvs1.uuid in svc_cor.parent_dependencies
assert svc_lvs2.uuid in svc_cor.parent_dependencies
# Get the BR associated with svc_cor
bp_rule = svc_cor.business_rule
assert bp_rule.operand == '&'
assert bp_rule.of_values == ('2', '2', '2')
assert bp_rule.not_value == False
assert bp_rule.is_of_mul == False
assert bp_rule.sons is not None
assert 2 == len(bp_rule.sons)
# First son is an OR rule
first_node = bp_rule.sons[0]
assert isinstance(first_node, DependencyNode)
assert first_node.operand == '|'
assert first_node.of_values == ('2', '2', '2')
assert first_node.not_value == False
assert first_node.sons is not None
assert first_node.sons is not []
assert 2 == len(first_node.sons)
# First son of the node is linked to a service and we have its uuid
son = first_node.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db1.uuid
# Second son of the node is also a rule (AND)
son = first_node.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == '&'
assert son.of_values == ('2', '2', '2')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert isinstance(son.sons[0], DependencyNode)
# Second node is a rule
second_node = son
assert isinstance(second_node, DependencyNode)
assert second_node.operand == '&'
assert second_node.of_values == ('2', '2', '2')
assert second_node.not_value == False
assert second_node.sons is not None
assert second_node.sons is not []
assert isinstance(son.sons[0], DependencyNode)
# First son of the node is linked to a service and we have its uuid
son = second_node.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_db2.uuid
# Second son of the node is also a rule (OR)
son = second_node.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == '|'
assert son.of_values == ('2', '2', '2')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert isinstance(son.sons[0], DependencyNode)
# Third node is a rule
third_node = son
assert isinstance(third_node, DependencyNode)
assert third_node.operand == '|'
assert third_node.of_values == ('2', '2', '2')
assert third_node.not_value == False
assert third_node.sons is not None
assert third_node.sons is not []
assert isinstance(son.sons[0], DependencyNode)
# First son of the node is linked to a service and we have its uuid
son = third_node.sons[0]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_lvs1.uuid
# Second son of the node is also a rule (OR)
son = third_node.sons[1]
assert isinstance(son, DependencyNode)
assert son.operand == 'service'
assert son.of_values == ('0', '0', '0')
assert son.not_value == False
assert son.sons is not None
assert son.sons is not []
assert son.sons[0] == svc_lvs2.uuid
# Now start working on the states
self.scheduler_loop(1, [
[svc_db1, 0, 'OK | rtt=10'],
[svc_db2, 0, 'OK | value1=1 value2=2'],
[svc_lvs1, 0, 'OK'],
[svc_lvs2, 0, 'OK'],
[host, 0, 'UP'],
[router, 0, 'UP']
])
assert 'OK' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 'OK' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 'OK' == svc_lvs1.state
assert 'HARD' == svc_lvs1.state_type
assert 'OK' == svc_lvs2.state
assert 'HARD' == svc_lvs2.state_type
# All is green, the rule should be green too
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get db1 CRITICAL/HARD
self.scheduler_loop(2, [
[svc_db1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db1.state
assert 'HARD' == svc_db1.state_type
assert 2 == svc_db1.last_hard_state_id
# The rule must still be a 0 (OR inside)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we also set db2 as CRITICAL/HARD...
self.scheduler_loop(2, [
[svc_db2, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 2 == svc_db2.last_hard_state_id
# And now the state of the rule must be 2
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# And If we set db2 to WARNING?
self.scheduler_loop(2, [
[svc_db2, 1, 'WARNING | value1=1 value2=2']
])
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# Must be WARNING (better no 0 value)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 1 == state
# Acknowledge db2
cmd = "[%lu] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;db2;2;1;1;lausser;blablub" % (now)
self._sched.run_external_commands([cmd])
assert True == svc_db2.problem_has_been_acknowledged
# Must be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Unacknowledge then downtime db2
duration = 300
cmd = "[%lu] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;db2" % now
self._sched.run_external_commands([cmd])
assert False == svc_db2.problem_has_been_acknowledged
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;db2;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[svc_cor, None, None]])
assert svc_db2.scheduled_downtime_depth > 0
assert True == svc_db2.in_scheduled_downtime
assert 'WARNING' == svc_db2.state
assert 'HARD' == svc_db2.state_type
assert 1 == svc_db2.last_hard_state_id
# Must be OK
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
self.assertEqual(0, state)
# We should got now svc_db2 and svc_db1 as root problems
assert svc_db1.uuid in svc_cor.source_problems
assert svc_db2.uuid in svc_cor.source_problems
# What about now with the router in DOWN state?
self.scheduler_loop(5, [[router, 2, 'DOWN']])
assert 'DOWN' == router.state
assert 'HARD' == router.state_type
assert 1 == router.last_hard_state_id
# Must be CRITICAL (CRITICAL VERSUS DOWN -> DOWN)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == state
# Now our root problem is router
assert router.uuid in svc_cor.source_problems
# We will try a strange rule that ask UP&UP -> DOWN&DONW-> OK
def test_darthelmet_rule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print("Get the hosts and services")
now = time.time()
host = self._sched.hosts.find_by_name("test_darthelmet")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
A = self._sched.hosts.find_by_name("test_darthelmet_A")
B = self._sched.hosts.find_by_name("test_darthelmet_B")
assert True == host.got_business_rule
assert host.business_rule is not None
bp_rule = host.business_rule
assert '|' == bp_rule.operand
# Now state working on the states
self.scheduler_loop(3, [[host, 0, 'UP'], [A, 0, 'UP'], [B, 0, 'UP'] ] )
assert 'UP' == host.state
assert 'HARD' == host.state_type
assert 'UP' == A.state
assert 'HARD' == A.state_type
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
print("WTF0", state)
assert 0 == state
# Now we set the A as soft/DOWN
self.scheduler_loop(1, [[A, 2, 'DOWN']])
assert 'DOWN' == A.state
assert 'SOFT' == A.state_type
assert 0 == A.last_hard_state_id
# The business rule must still be 0
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# Now we get A DOWN/HARD
self.scheduler_loop(3, [[A, 2, 'DOWN']])
assert 'DOWN' == A.state
assert 'HARD' == A.state_type
assert 1 == A.last_hard_state_id
# The rule must still be a 2 (or inside)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
print("WFT", state)
assert 2 == state
# Now we also set B as DOWN/HARD, should get back to 0!
self.scheduler_loop(3, [[B, 2, 'DOWN']])
assert 'DOWN' == B.state
assert 'HARD' == B.state_type
assert 1 == B.last_hard_state_id
# And now the state of the rule must be 0 again! (strange rule isn't it?)
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# We set B as UP and acknowledge A
self.scheduler_loop(3, [[B, 0, 'UP']])
assert 'UP' == B.state
assert 'HARD' == B.state_type
assert 0 == B.last_hard_state_id
cmd = "[%lu] ACKNOWLEDGE_HOST_PROBLEM;test_darthelmet_A;1;1;0;lausser;blablub" % now
self._sched.run_external_commands([cmd])
assert 'DOWN' == A.state
assert 'HARD' == A.state_type
assert 1 == A.last_hard_state_id
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
# We unacknowledge then downtime A
duration = 300
cmd = "[%lu] REMOVE_HOST_ACKNOWLEDGEMENT;test_darthelmet_A" % now
self._sched.run_external_commands([cmd])
cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_darthelmet_A;%d;%d;1;0;%d;lausser;blablub" % (now, now, now + duration, duration)
self._sched.run_external_commands([cmd])
self.scheduler_loop(1, [[B, None, None]])
assert 'DOWN' == A.state
assert 'HARD' == A.state_type
assert 1 == A.last_hard_state_id
state = bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == state
|
class TestBusinessCorrelator(AlignakTest):
def setUp(self):
pass
def launch_internal_check(self, svc_br):
''' Launch an internal check for the business rule service provided '''
pass
def test_br_creation(self):
''' BR - check creation of a simple services OR (db1 OR db2)
:return:
'''
pass
def test_simple_or_business_correlator(self):
''' BR - try a simple services OR (db1 OR db2)
bp_rule!test_host_0,db1|test_host_0,db2
:return:
'''
pass
def test_simple_or_business_correlator_with_schedule(self):
''' BR - try a simple services OR (db1 OR db2) with internal checks
bp_rule!test_host_0,db1|test_host_0,db2
:return:
'''
pass
def test_simple_or_not_business_correlator(self):
''' BR - try a simple services OR (db1 OR NOT db2)
bp_rule!test_host_0,db1|!test_host_0,db2
:return:
'''
pass
def test_simple_and_business_correlator(self):
''' BR - try a simple services AND (db1 AND db2)
bp_rule!test_host_0,db1&test_host_0,db2
:return:
'''
pass
def test_simple_and_not_business_correlator(self):
''' BR - try a simple services AND NOT (db1 AND NOT db2)
bp_rule!test_host_0,db1&!test_host_0,db2
'''
pass
def test_simple_1of_business_correlator(self):
''' BR - simple 1of: db1 OR/AND db2
bp_rule!1 of: test_host_0,db1|test_host_0,db2
'''
pass
def test_simple_1of_neg_business_correlator(self):
''' BR - simple -1of: db1 OR/AND db2
bp_rule!-1 of: test_host_0,db1|test_host_0,db2
'''
pass
def test_simple_1of_pct_business_correlator(self):
''' BR - simple 50%of: db1 OR/AND db2
bp_rule!50% of: test_host_0,db1|test_host_0,db2
'''
pass
def test_simple_1of_pct_neg_business_correlator(self):
''' BR - simple -50%of: db1 OR/AND db2
bp_rule!-50% of: test_host_0,db1|test_host_0,db2
'''
pass
def run_simple_1of_business_correlator(self, with_pct=False, with_neg=False):
'''
:param with_pct: True if a percentage is set
:param with_neg: True if a negation is set
:return:
'''
pass
def test_simple_1of_business_correlator_with_hosts(self):
''' BR - simple 1of: test_router_0 OR/AND test_host_0'''
pass
def test_simple_1of_neg_business_correlator_with_hosts(self):
''' BR - -1of: test_router_0 OR/AND test_host_0 '''
pass
def test_simple_1of_pct_business_correlator_with_hosts(self):
''' BR - simple 50%of: test_router_0 OR/AND test_host_0 '''
pass
def test_simple_1of_pct_neg_business_correlator_with_hosts(self):
''' BR - simple -50%of: test_router_0 OR/AND test_host_0 '''
pass
def run_simple_1of_business_correlator_with_hosts(self, with_pct=False, with_neg=False):
'''
:param with_pct: True if a percentage is set
:param with_neg: True if a negation is set
:return:
'''
pass
def test_dep_node_list_elements(self):
''' BR - list all elements
:return:
'''
pass
def test_full_erp_rule_with_schedule(self):
''' Full ERP rule with real checks scheduled
bp_rule!(test_host_0,db1|test_host_0,db2) & (test_host_0,web1|test_host_0,web2)
& (test_host_0,lvs1|test_host_0,lvs2)
:return:
'''
pass
def test_complex_ABCof_business_correlator(self):
''' BR - complex -bp_rule!5,1,1 of: test_host_0,A|test_host_0,B|test_host_0,C|
test_host_0,D|test_host_0,E '''
pass
def test_complex_ABCof_pct_business_correlator(self):
''' BR - complex bp_rule!100%,20%,20% of: test_host_0,A|test_host_0,B|test_host_0,C|
test_host_0,D|test_host_0,E '''
pass
def run_complex_ABCof_business_correlator(self, with_pct=False):
'''
:param with_pct: True if a percentage is set
:return:
'''
pass
def test_multi_layers(self):
''' BR - multi-levels rule
bp_rule!(test_host_0,db1| (test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2) ) )
& test_router_0
:return:
'''
pass
def test_darthelmet_rule(self):
pass
| 26 | 23 | 97 | 13 | 61 | 25 | 2 | 0.4 | 1 | 3 | 1 | 0 | 25 | 1 | 25 | 80 | 2,442 | 348 | 1,531 | 170 | 1,505 | 620 | 1,388 | 170 | 1,362 | 17 | 2 | 2 | 53 |
4,039 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_broks.py
|
tests.test_broks.TestBroks
|
class TestBroks(AlignakTest):
"""
This class test several Brok creation
"""
def setUp(self):
super(TestBroks, self).setUp()
self.setup_with_file('cfg/cfg_default.cfg', dispatching=True)
self._main_broker.broks = []
def test_brok_initial_status(self):
"""Test initial status broks
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
initial_broks_count = self._scheduler.fill_initial_broks('broker-master')
print("fill_initial_broks got %s broks" % initial_broks_count)
for broker_uuid in self._scheduler.my_daemon.brokers:
broker = self._scheduler.my_daemon.brokers[broker_uuid]
print("Broker: %s" % broker)
for brok in broker.broks:
print("-: %s" % brok)
# Check the brok attributes
assert hasattr(brok, 'uuid')
assert hasattr(brok, 'creation_time')
assert hasattr(brok, 'prepared')
# Not yet prepared to get used, must call the prepare method!
assert brok.prepared is False
assert hasattr(brok, 'instance_id')
assert hasattr(brok, 'type')
assert hasattr(brok, 'data')
# assert isinstance(brok.data, string_types)
def test_unknown_check_result_brok(self):
""" Unknown check results commands in broks
"""
# unknown_host_check_result_brok
excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is UP'
expected = {
'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0',
'output': 'Host is UP', 'perf_data': None
}
brok = ExternalCommandManager.get_unknown_check_result_brok(excmd)
print("Brok: %s" % brok)
# the prepare method returns the brok data
assert expected == brok.prepare()
# unknown_host_check_result_brok with perfdata
excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is UP|rtt=9999'
expected = {
'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0',
'output': 'Host is UP', 'perf_data': 'rtt=9999'
}
brok = ExternalCommandManager.get_unknown_check_result_brok(excmd)
assert expected == brok.prepare()
# unknown_service_check_result_brok
excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;host-checked;0;Everything OK'
expected = {
'time_stamp': 1234567890, 'return_code': '0', 'host_name': 'host-checked',
'output': 'Everything OK', 'perf_data': None
}
brok = ExternalCommandManager.get_unknown_check_result_brok(excmd)
assert expected == brok.prepare()
# unknown_service_check_result_brok with perfdata
excmd = '[1234567890] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Service is WARNING|rtt=9999;5;10;0;10000'
expected = {
'host_name': 'test_host_0', 'time_stamp': 1234567890,
'service_description': 'test_ok_0', 'return_code': '1',
'output': 'Service is WARNING', 'perf_data': 'rtt=9999;5;10;0;10000'
}
brok = ExternalCommandManager.get_unknown_check_result_brok(excmd)
assert expected == brok.prepare()
def test_brok_checks_results(self):
"""Test broks checks results
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# Make the host down soft then hard.
self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']])
time.sleep(0.1)
self.scheduler_loop(2, [[host, 2, 'DOWN']])
time.sleep(0.1)
host_check_results = []
service_check_results = []
for brok in self._main_broker.broks:
print("Brok %s: %s" % (brok.type, brok))
# Check the brok attributes
assert hasattr(brok, 'uuid')
assert hasattr(brok, 'creation_time')
assert hasattr(brok, 'prepared')
# Not yet prepared to get used, must call the prepare method!
assert brok.prepared is False
assert hasattr(brok, 'instance_id')
assert hasattr(brok, 'type')
assert hasattr(brok, 'data')
# assert isinstance(brok.data, string_types)
if brok.type == 'host_check_result':
host_check_results.append(brok)
elif brok.type == 'service_check_result':
service_check_results.append(brok)
assert len(host_check_results) == 3
assert len(service_check_results) == 1
# Prepare the broks to get used...
print("HCR: %s" % host_check_results[0])
host_check_results[0].prepare()
print("HCR: %s" % host_check_results[0])
hdata = host_check_results[0].data
# Now it is a dict
assert isinstance(hdata, dict)
assert hdata['state'] == 'DOWN'
assert hdata['state_type'] == 'SOFT'
print("SCR: %s" % service_check_results[0])
service_check_results[0].prepare()
print("SCR: %s" % service_check_results[0])
sdata = service_check_results[0].data
assert isinstance(hdata, dict)
assert sdata['state'] == 'OK'
assert sdata['state_type'] == 'HARD'
def test_brok_get_events(self):
"""Test broks for events
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.notification_interval = 0.001
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
# Make the host down soft then hard.
self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']])
time.sleep(0.1)
self.scheduler_loop(2, [[host, 2, 'DOWN']])
time.sleep(0.1)
print("My events: %s" % self._scheduler_daemon.events)
my_events = copy.deepcopy(self._scheduler_daemon.events)
assert my_events != self._scheduler_daemon.events
my_events2 = copy.copy(self._scheduler_daemon.events)
assert my_events2 == self._scheduler_daemon.events
for brok in self._scheduler_daemon.events:
# Check the brok attributes
assert hasattr(brok, 'uuid')
assert hasattr(brok, 'creation_time')
assert hasattr(brok, 'prepared')
# Not yet prepared to get used, must call the prepare method!
assert brok.prepared is False
assert hasattr(brok, 'instance_id')
assert hasattr(brok, 'type')
assert hasattr(brok, 'data')
# assert isinstance(brok.data, string_types)
# Get an event from the brok
ts, level, message = brok.get_event()
assert brok.prepared is True
assert isinstance(brok.data, dict)
print("Event: %s / %s / %s" % (ts, level, message))
print("My events: %s" % my_events)
res = serialize(my_events, True)
print("My events: %s" % res)
|
class TestBroks(AlignakTest):
'''
This class test several Brok creation
'''
def setUp(self):
pass
def test_brok_initial_status(self):
'''Test initial status broks
'''
pass
def test_unknown_check_result_brok(self):
''' Unknown check results commands in broks
'''
pass
def test_brok_checks_results(self):
'''Test broks checks results
'''
pass
def test_brok_get_events(self):
'''Test broks for events
'''
pass
| 6 | 5 | 38 | 5 | 28 | 7 | 2 | 0.28 | 1 | 3 | 1 | 0 | 5 | 0 | 5 | 60 | 199 | 27 | 139 | 29 | 133 | 39 | 125 | 29 | 119 | 4 | 2 | 2 | 11 |
4,040 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_brok_ack_downtime.py
|
tests.test_brok_ack_downtime.TestBrokAckDowntime
|
class TestBrokAckDowntime(AlignakTest):
"""
This class test the acknowledge and downtime broks
"""
def setUp(self):
super(TestBrokAckDowntime, self).setUp()
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
self._main_broker.broks = []
def test_acknowledge_service(self):
"""Test broks when acknowledge
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
self._main_broker.broks = []
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
time.sleep(0.1)
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), 'test_host_0', 'test_ok_0', 2, 0, 1, 'darth vader', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(3, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
brok_ack_raise = []
brok_ack_expire = []
for brok in self._main_broker.broks:
print("Brok: %s" % brok)
if brok.type == 'acknowledge_raise':
brok_ack_raise.append(brok)
elif brok.type == 'acknowledge_expire':
brok_ack_expire.append(brok)
assert len(brok_ack_raise) == 1
assert len(brok_ack_expire) == 0
hdata = brok_ack_raise[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
assert hdata['comment'] == 'normal process'
# return service in OK mode, so the acknowledge will be removed by the scheduler
self._main_broker.broks = []
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 0, 'OK']])
brok_ack_raise = []
brok_ack_expire = []
for brok in self._main_broker.broks:
if brok.type == 'acknowledge_raise':
brok_ack_raise.append(brok)
elif brok.type == 'acknowledge_expire':
brok_ack_expire.append(brok)
assert len(brok_ack_raise) == 0
assert len(brok_ack_expire) == 1
hdata = brok_ack_expire[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
# Do the same but remove acknowledge with external commands:
self._main_broker.broks = []
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
time.sleep(0.1)
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), 'test_host_0', 'test_ok_0', 2, 0, 1, 'darth vader', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
cmd = "[{0}] REMOVE_SVC_ACKNOWLEDGEMENT;{1};{2}\n". \
format(int(now), 'test_host_0', 'test_ok_0')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
brok_ack_raise = []
brok_ack_expire = []
for brok in self._main_broker.broks:
print("Brok: %s" % brok)
if brok.type == 'acknowledge_raise':
brok_ack_raise.append(brok)
elif brok.type == 'acknowledge_expire':
brok_ack_expire.append(brok)
assert len(brok_ack_raise) == 1
assert len(brok_ack_expire) == 1
hdata = brok_ack_expire[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
assert hdata['comment'] == 'normal process'
def test_acknowledge_host(self):
"""Test broks when acknowledge
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 2, 'CRITICAL']])
time.sleep(0.1)
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), 'test_host_0', 1, 0, 1, (now + 2), 'darth vader', 'normal process')
self._scheduler.run_external_commands([cmd])
self.external_command_loop(2)
# self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 2, 'CRITICAL']])
brok_ack = []
print("Broker uuid: %s" % self._main_broker.uuid)
print("Broker broks: %s" % self._main_broker.broks)
for brok in self._main_broker.broks:
print("Broker brok: %s" % brok)
if brok.type == 'acknowledge_raise':
print("Brok: %s" % brok)
brok_ack.append(brok)
print("***Scheduler: %s" % self._scheduler)
print("***Scheduler daemon: %s" % self._scheduler.my_daemon)
print("***Scheduler daemon brokers: %s" % self._scheduler.my_daemon.brokers)
for broker_link_uuid in self._scheduler.my_daemon.brokers:
print("*** %s - broks: %s" % (broker_link_uuid, self._scheduler.my_daemon.brokers[broker_link_uuid].broks))
# Got one brok for the host ack and one brok for the service ack
assert len(brok_ack) == 2
host_brok = False
service_brok = False
brok_data = brok_ack[0].prepare()
assert brok_data['host'] == 'test_host_0'
if 'service' in brok_data:
assert brok_data['service'] == 'test_ok_0'
service_brok = True
else:
host_brok = True
brok_data = brok_ack[1].prepare()
assert brok_data['host'] == 'test_host_0'
if 'service' in brok_data:
assert brok_data['service'] == 'test_ok_0'
service_brok = True
else:
host_brok = True
assert host_brok and service_brok
# return host in UP mode, so the acknowledge will be removed by the scheduler
self._main_broker.broks = []
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 0, 'OK']])
brok_ack_raise = []
brok_ack_expire = []
for brok in self._main_broker.broks:
if brok.type == 'acknowledge_raise':
brok_ack_raise.append(brok)
elif brok.type == 'acknowledge_expire':
brok_ack_expire.append(brok)
assert len(brok_ack_raise) == 0
assert len(brok_ack_expire) == 2
host_brok = False
service_brok = False
brok_data = brok_ack_expire[0].prepare()
assert brok_data['host'] == 'test_host_0'
if 'service' in brok_data:
assert brok_data['service'] == 'test_ok_0'
service_brok = True
else:
host_brok = True
brok_data = brok_ack_expire[1].prepare()
assert brok_data['host'] == 'test_host_0'
if 'service' in brok_data:
assert brok_data['service'] == 'test_ok_0'
service_brok = True
else:
host_brok = True
assert host_brok and service_brok
# Do the same but remove acknowledge with external commands:
self._main_broker.broks = []
self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 2, 'CRITICAL']])
time.sleep(0.1)
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), 'test_host_0', 1, 0, 1, (now + 2), 'darth vader', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 2, 'CRITICAL']])
cmd = "[{0}] REMOVE_HOST_ACKNOWLEDGEMENT;{1}\n". \
format(int(now), 'test_host_0')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(3, [[host, 2, 'DOWN'], [svc, 2, 'CRITICAL']])
brok_ack_raise = []
brok_ack_expire = []
for brok in self._main_broker.broks:
print("Brok: %s" % brok)
if brok.type == 'acknowledge_raise':
brok_ack_raise.append(brok)
elif brok.type == 'acknowledge_expire':
brok_ack_expire.append(brok)
assert len(brok_ack_raise) == 2
assert len(brok_ack_expire) == 1
brok_data = brok_ack_expire[0].prepare()
assert brok_data['host'] == 'test_host_0'
assert 'service' not in brok_data
def test_fixed_downtime_service(self):
"""Test broks when downtime
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0",
"test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
time.sleep(0.1)
# schedule a 5 seconds downtime
duration = 5
now = int(time.time())
# downtime valid for 5 seconds from now
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;1;0;%d;" \
"downtime author;downtime comment" % (now, now, now + duration, duration)
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
self.external_command_loop()
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 1
assert len(brok_downtime_expire) == 0
hdata = brok_downtime_raise[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
assert hdata['comment'] == 'downtime comment'
# expire downtime
self._main_broker.broks = []
time.sleep(5)
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 0
assert len(brok_downtime_expire) == 1
hdata = brok_downtime_expire[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
assert hdata['comment'] == 'downtime comment'
def test_fixed_downtime_host(self):
"""Test broks when downtime
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0",
"test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
time.sleep(0.1)
# schedule a 5 seconds downtime
duration = 5
now = int(time.time())
# downtime valid for 5 seconds from now
cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_host_0;%d;%d;1;0;%d;" \
"downtime author;downtime comment" % (now, now, now + duration, duration)
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
self.external_command_loop()
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 1
assert len(brok_downtime_expire) == 0
hdata = brok_downtime_raise[0].prepare()
assert hdata['host'] == 'test_host_0'
assert 'service' not in hdata
# expire downtime
self._main_broker.broks = []
time.sleep(5)
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 0
assert len(brok_downtime_expire) == 1
hdata = brok_downtime_expire[0].prepare()
assert hdata['host'] == 'test_host_0'
assert 'service' not in hdata
def test_flexible_downtime_service(self):
"""Test broks when downtime
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0",
"test_ok_0")
# To make tests quicker we make notifications send very quickly
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
time.sleep(0.1)
# schedule a 5 seconds downtime
duration = 5
now = int(time.time())
# downtime valid for 5 seconds from now
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;0;0;%d;" \
"downtime author;downtime comment" % (now, now, now + 3600, duration)
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(2, [[host, 0, 'UP'], [svc, 0, 'OK']])
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 0
assert len(brok_downtime_expire) == 0
time.sleep(1)
self._main_broker.broks = []
self.scheduler_loop(3, [[host, 0, 'UP'], [svc, 2, 'CRITICAL']])
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 1
assert len(brok_downtime_expire) == 0
hdata = brok_downtime_raise[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
def test_cancel_service(self):
"""Test broks when cancel downtime
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP']])
duration = 5
now = int(time.time())
# downtime valid for 5 seconds from now
cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;1;0;%d;" \
"downtime author;downtime comment" % (now, now, now + duration, duration)
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 1
assert len(brok_downtime_expire) == 0
# External command: delete all host downtime
now = int(time.time())
self._main_broker.broks = []
cmd = '[%d] DEL_ALL_SVC_DOWNTIMES;test_host_0;test_ok_0' % now
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 0
assert len(brok_downtime_expire) == 1
hdata = brok_downtime_expire[0].prepare()
assert hdata['host'] == 'test_host_0'
assert hdata['service'] == 'test_ok_0'
def test_cancel_host(self):
"""Test broks when cancel downtime
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP']])
duration = 5
now = int(time.time())
# downtime valid for 5 seconds from now
cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_host_0;%d;%d;1;0;%d;" \
"downtime author;downtime comment" % (now, now, now + duration, duration)
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 1
assert len(brok_downtime_expire) == 0
# External command: delete all host downtime
now = int(time.time())
self._main_broker.broks = []
cmd = '[%d] DEL_ALL_HOST_DOWNTIMES;test_host_0' % now
self._scheduler.run_external_commands([cmd])
self.external_command_loop()
brok_downtime_raise = []
brok_downtime_expire = []
for brok in self._main_broker.broks:
if brok.type == 'downtime_raise':
brok_downtime_raise.append(brok)
elif brok.type == 'downtime_expire':
brok_downtime_expire.append(brok)
assert len(brok_downtime_raise) == 0
assert len(brok_downtime_expire) == 1
hdata = brok_downtime_expire[0].prepare()
assert hdata['host'] == 'test_host_0'
|
class TestBrokAckDowntime(AlignakTest):
'''
This class test the acknowledge and downtime broks
'''
def setUp(self):
pass
def test_acknowledge_service(self):
'''Test broks when acknowledge
:return: None
'''
pass
def test_acknowledge_host(self):
'''Test broks when acknowledge
:return: None
'''
pass
def test_fixed_downtime_service(self):
'''Test broks when downtime
:return: None
'''
pass
def test_fixed_downtime_host(self):
'''Test broks when downtime
:return: None
'''
pass
def test_flexible_downtime_service(self):
'''Test broks when downtime
:return: None
'''
pass
def test_cancel_service(self):
'''Test broks when cancel downtime
:return: None
'''
pass
def test_cancel_host(self):
'''Test broks when cancel downtime
:return: None
'''
pass
| 9 | 8 | 65 | 11 | 49 | 7 | 8 | 0.15 | 1 | 2 | 0 | 0 | 8 | 0 | 8 | 63 | 532 | 96 | 390 | 72 | 381 | 58 | 352 | 72 | 343 | 14 | 2 | 2 | 60 |
4,041 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_acknowledge.py
|
tests.test_acknowledge.TestAcknowledges
|
class TestAcknowledges(AlignakTest):
"""
This class test acknowledge
"""
def setUp(self):
super(TestAcknowledges, self).setUp()
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
def test_ack_host_sticky_ds_dh(self):
"""
Test host acknowledge with sticky when Down soft -> Down hard -> up
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n".\
format(int(now), host.host_name, 2, 0, 1, 'dark vador', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
def test_ack_host_sticky_us_uh_dh(self):
"""
Test host acknowledge with sticky when Unreachable soft -> Unreachable hard -> Down hard
-> up
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.event_handler_enabled = False
host.notifications_enabled = False
host_router = self._scheduler.hosts.find_by_name("test_router_0")
host_router.checks_in_progress = []
host_router.event_handler_enabled = False
host_router.notifications_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [host_router, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "UP" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "SOFT" == host_router.state_type
# Unchanged
assert "UP" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "SOFT" == host_router.state_type
# Unchanged
assert "UP" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
# Goes unreachable hard
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
# Unchanged
assert "UNREACHABLE" == host.state
assert "SOFT" == host.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n". \
format(int(now), host.host_name, 2, 0, 1, 'dark vador', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
assert "UNREACHABLE" == host.state
assert "SOFT" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host_router, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
def test_ack_host_nosticky_ds_dh(self):
"""
Test host acknowledge with no sticky when Down soft -> Down hard -> up
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n". \
format(int(now), host.host_name, 1, 0, 1, 'dark vador', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
def test_ack_host_nosticky_us_uh_dh(self):
"""
Test host acknowledge with no sticky when Unreachable soft -> Unreachable hard -> Down hard
-> up
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.event_handler_enabled = False
host.notifications_enabled = False
host_router = self._scheduler.hosts.find_by_name("test_router_0")
host_router.checks_in_progress = []
host_router.event_handler_enabled = False
host_router.notifications_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [host_router, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "UP" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "SOFT" == host_router.state_type
# Unchanged
assert "UP" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "SOFT" == host_router.state_type
# Unchanged
assert "UP" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
# Goes unreachable hard
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
# Unchanged
assert "UNREACHABLE" == host.state
assert "SOFT" == host.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n". \
format(int(now), host.host_name, 1, 0, 1, 'dark vador', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
assert "UNREACHABLE" == host.state
assert "SOFT" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host_router.state
assert "HARD" == host_router.state_type
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host_router, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "UNREACHABLE" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
self.scheduler_loop(1, [[host_router, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host_router.state
assert "HARD" == host_router.state_type
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
assert "UP" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
def test_ack_service_sticky_ws_wh_ch(self):
"""
Test service acknowledge with sticky when Warning soft -> Warning hard -> Critical hard
-> ok
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 2, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "HARD" == svc.state_type
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_service_sticky_ws_ch(self):
"""
Test service acknowledge with sticky when Warning soft -> Critical hard -> ok
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 2, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_service_nosticky_ws_ch(self):
"""
Test service acknowledge with sticky when Warning soft -> Critical hard -> ok
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n".\
format(int(now), host.host_name, svc.service_description, 1, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_service_nosticky_ws_ch_early(self):
"""
Test service acknowledge with sticky when first (on 3 attempts) Warning soft ->
Critical hard -> ok
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 1, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
assert svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "SOFT" == svc.state_type
assert not svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_service_sticky_ws_ok(self):
"""
Test service acknowledge with sticky when Warning soft -> ok
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 2, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_service_nosticky_ws_ok(self):
"""
Test service acknowledge with sticky when Warning soft -> ok
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 1, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_expire_service_nosticky_ch(self):
"""
Test service acknowledge expire 2 seconds with sticky when Critical hard
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "SOFT" == svc.state_type
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "SOFT" == svc.state_type
assert not svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM_EXPIRE;{1};{2};{3};{4};{5};{6};{7};{8}\n". \
format(int(now), host.host_name, svc.service_description, 1, 0, 1, (now + 2), 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert svc.problem_has_been_acknowledged
time.sleep(2.5)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
assert "OK" == svc.state
assert "HARD" == svc.state_type
assert not svc.problem_has_been_acknowledged
def test_ack_expire_host_nosticky_dh(self):
"""
Test host acknowledge expire 2 seconds with no sticky when Down hard
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
assert not host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, 1, 0, 1, (now + 2), 'dark vador', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
time.sleep(2.5)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert not host.problem_has_been_acknowledged
def test_remove_ack_host_nosticky_dh(self):
"""
Test remove host acknowledge with no sticky when Down hard
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not host.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n". \
format(int(now), host.host_name, 1, 0, 1, 'dark vador', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
assert host.problem_has_been_acknowledged
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "HARD" == host.state_type
assert host.problem_has_been_acknowledged
now = time.time()
cmd = "[{0}] REMOVE_HOST_ACKNOWLEDGEMENT;{1}\n". \
format(int(now), host.host_name)
self._scheduler.run_external_commands([cmd])
assert not host.problem_has_been_acknowledged
def test_remove_ack_service_nosticky_ch(self):
"""
Test service acknowledge expire 2 seconds with sticky when Critical hard
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.event_handler_enabled = False
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
assert not svc.problem_has_been_acknowledged
self.assert_actions_count(0)
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "SOFT" == svc.state_type
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "SOFT" == svc.state_type
assert not svc.problem_has_been_acknowledged
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 1, 0, 1, 'dark vador',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# time.sleep(0.1)
assert "CRITICAL" == svc.state
assert "HARD" == svc.state_type
assert svc.problem_has_been_acknowledged
now = time.time()
cmd = "[{0}] REMOVE_SVC_ACKNOWLEDGEMENT;{1};{2}\n". \
format(int(now), host.host_name, svc.service_description)
self._scheduler.run_external_commands([cmd])
assert not svc.problem_has_been_acknowledged
|
class TestAcknowledges(AlignakTest):
'''
This class test acknowledge
'''
def setUp(self):
pass
def test_ack_host_sticky_ds_dh(self):
'''
Test host acknowledge with sticky when Down soft -> Down hard -> up
:return: None
'''
pass
def test_ack_host_sticky_us_uh_dh(self):
'''
Test host acknowledge with sticky when Unreachable soft -> Unreachable hard -> Down hard
-> up
:return: None
'''
pass
def test_ack_host_nosticky_ds_dh(self):
'''
Test host acknowledge with no sticky when Down soft -> Down hard -> up
:return: None
'''
pass
def test_ack_host_nosticky_us_uh_dh(self):
'''
Test host acknowledge with no sticky when Unreachable soft -> Unreachable hard -> Down hard
-> up
:return: None
'''
pass
def test_ack_service_sticky_ws_wh_ch(self):
'''
Test service acknowledge with sticky when Warning soft -> Warning hard -> Critical hard
-> ok
:return: None
'''
pass
def test_ack_service_sticky_ws_ch(self):
'''
Test service acknowledge with sticky when Warning soft -> Critical hard -> ok
:return: None
'''
pass
def test_ack_service_nosticky_ws_ch(self):
'''
Test service acknowledge with sticky when Warning soft -> Critical hard -> ok
:return: None
'''
pass
def test_ack_service_nosticky_ws_ch_early(self):
'''
Test service acknowledge with sticky when first (on 3 attempts) Warning soft ->
Critical hard -> ok
:return: None
'''
pass
def test_ack_service_sticky_ws_ok(self):
'''
Test service acknowledge with sticky when Warning soft -> ok
:return: None
'''
pass
def test_ack_service_nosticky_ws_ok(self):
'''
Test service acknowledge with sticky when Warning soft -> ok
:return: None
'''
pass
def test_ack_expire_service_nosticky_ch(self):
'''
Test service acknowledge expire 2 seconds with sticky when Critical hard
:return: None
'''
pass
def test_ack_expire_host_nosticky_dh(self):
'''
Test host acknowledge expire 2 seconds with no sticky when Down hard
:return: None
'''
pass
def test_remove_ack_host_nosticky_dh(self):
'''
Test remove host acknowledge with no sticky when Down hard
:return: None
'''
pass
def test_remove_ack_service_nosticky_ch(self):
'''
Test service acknowledge expire 2 seconds with sticky when Critical hard
:return: None
'''
pass
| 16 | 15 | 55 | 8 | 37 | 12 | 1 | 0.33 | 1 | 2 | 0 | 0 | 15 | 0 | 15 | 70 | 850 | 140 | 553 | 74 | 537 | 183 | 516 | 74 | 500 | 1 | 2 | 0 | 15 |
4,042 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_utils_functions.py
|
tests.test_aa_utils_functions.TestUtils
|
class TestUtils(AlignakTest):
def setUp(self):
super(TestUtils, self).setUp()
def test_average_percentile(self):
# No values
my_values = []
lat_avg, lat_min, lat_max = average_percentile(my_values)
assert lat_avg is None
assert lat_min is None
assert lat_max is None
my_values = [10, 8, 9, 7, 3, 11, 7, 13, 9, 10]
lat_avg, lat_min, lat_max = average_percentile(my_values)
print(("result: %.2f, %.2f, %.2f" % (lat_min, lat_avg, lat_max)))
assert 8.7 == lat_avg, 'Average'
assert 4.8 == lat_min, 'Minimum'
assert 12.1 == lat_max, 'Maximum'
|
class TestUtils(AlignakTest):
def setUp(self):
pass
def test_average_percentile(self):
pass
| 3 | 0 | 8 | 1 | 7 | 1 | 1 | 0.07 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 18 | 2 | 15 | 5 | 12 | 1 | 15 | 5 | 12 | 1 | 2 | 0 | 2 |
4,043 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestTimeperiod
|
class TestTimeperiod(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['timeperiod_name']
properties = dict([
('alias', ''),
('imported_from', 'alignak-self'),
('use', []),
('definition_order', 100),
('name', ''),
('register', True),
('dateranges', []),
('exclude', []),
('is_active', False),
('activated_once', False),
('unresolved', []),
('invalid_entries', [])
])
def setUp(self):
super(TestTimeperiod, self).setUp()
from alignak.objects.timeperiod import Timeperiod
self.item = Timeperiod({}, parsing=True)
|
class TestTimeperiod(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 25 | 4 | 21 | 7 | 18 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,044 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_business_correlator_expand_expression.py
|
tests.test_business_correlator_expand_expression.TestBusinessCorrelatorExpand
|
class TestBusinessCorrelatorExpand(AlignakTest):
def setUp(self):
super(TestBusinessCorrelatorExpand, self).setUp()
self.setup_with_file('cfg/cfg_business_correlator_expression.cfg',
dispatching=True)
assert self.conf_is_correct
self._sched = self._scheduler
def test_hostgroup_expansion_bprule_simple_host_srv(self):
""" BR expansion - simple host/service """
for name in ("bprule_00", "bprule_01", "bprule_02", "bprule_03",
"bprule_04", "bprule_05", "bprule_06"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '&' == bp_rule.operand
assert False is bp_rule.not_value
assert ('2', '2', '2') == bp_rule.of_values
srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv1")
sons = bp_rule.sons
assert 2 == len(sons)
assert 'service' == sons[0].operand
assert 'service' == sons[1].operand
assert srv1.uuid in (sons[0].sons[0], sons[1].sons[0])
assert srv2.uuid in (sons[0].sons[0], sons[1].sons[0])
def test_hostgroup_expansion_bprule_simple_xof_host_srv(self):
""" BR expansion - simple X of:"""
for name in ("bprule_10", "bprule_11", "bprule_12", "bprule_13",
"bprule_14", "bprule_15", "bprule_16", "bprule_17"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert 'of:' == bp_rule.operand
assert False is bp_rule.not_value
assert ('1', '2', '2') == bp_rule.of_values
srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv1")
sons = bp_rule.sons
assert 2 == len(sons)
assert 'service' == sons[0].operand
assert 'service' == sons[1].operand
assert srv1.uuid in (sons[0].sons[0], sons[1].sons[0])
assert srv2.uuid in (sons[0].sons[0], sons[1].sons[0])
def test_hostgroup_expansion_bprule_combined_and(self):
""" BR expansion - combined AND """
for name in ("bprule_20", "bprule_21", "bprule_22", "bprule_23",
"bprule_24", "bprule_25", "bprule_26"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '&' == bp_rule.operand
assert False is bp_rule.not_value
assert ('2', '2', '2') == bp_rule.of_values
sons = bp_rule.sons
assert 2 == len(sons)
for son in sons:
assert '&' == son.operand
assert False is son.not_value
assert ('2', '2', '2') == son.of_values
assert 2 == len(son.sons)
assert 'service' == son.sons[0].operand
assert 'service' == son.sons[1].operand
hst1_srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
hst2_srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv1")
hst1_srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv2")
hst2_srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
assert hst1_srv1.uuid in (sons[0].sons[0].sons[0], sons[0].sons[1].sons[0])
assert hst2_srv1.uuid in (sons[0].sons[0].sons[0], sons[0].sons[1].sons[0])
assert hst1_srv2.uuid in (sons[1].sons[0].sons[0], sons[1].sons[1].sons[0])
assert hst2_srv2.uuid in (sons[1].sons[0].sons[0], sons[1].sons[1].sons[0])
def test_hostgroup_expansion_bprule_combined_or(self):
""" BR expansion - combined OR """
for name in ("bprule_30", "bprule_31", "bprule_32", "bprule_33",
"bprule_34", "bprule_35", "bprule_36"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '|' == bp_rule.operand
assert False is bp_rule.not_value
assert ('2', '2', '2') == bp_rule.of_values
sons = bp_rule.sons
assert 2 == len(sons)
for son in sons:
assert '&' == son.operand
assert False is son.not_value
assert ('2', '2', '2') == son.of_values
assert 2 == len(son.sons)
assert 'service' == son.sons[0].operand
assert 'service' == son.sons[1].operand
hst1_srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
hst2_srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv1")
hst1_srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv2")
hst2_srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
assert hst1_srv1.uuid in (sons[0].sons[0].sons[0], sons[0].sons[1].sons[0])
assert hst2_srv1.uuid in (sons[0].sons[0].sons[0], sons[0].sons[1].sons[0])
assert hst1_srv2.uuid in (sons[1].sons[0].sons[0], sons[1].sons[1].sons[0])
assert hst2_srv2.uuid in (sons[1].sons[0].sons[0], sons[1].sons[1].sons[0])
def test_hostgroup_expansion_bprule_simple_hosts(self):
""" BR expansion - simple hosts """
for name in ("bprule_40", "bprule_41", "bprule_42", "bprule_43"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '&' == bp_rule.operand
assert False is bp_rule.not_value
assert ('2', '2', '2') == bp_rule.of_values
hst1 = self._sched.hosts.find_by_name("test_host_01")
hst2 = self._sched.hosts.find_by_name("test_host_02")
sons = bp_rule.sons
assert 2 == len(sons)
assert 'host' == sons[0].operand
assert 'host' == sons[1].operand
assert hst1.uuid in (sons[0].sons[0], sons[1].sons[0])
assert hst2.uuid in (sons[0].sons[0], sons[1].sons[0])
def test_hostgroup_expansion_bprule_xof_hosts(self):
""" BR expansion - X of: hosts """
for name in ("bprule_50", "bprule_51", "bprule_52", "bprule_53", "bprule_54"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert 'of:' == bp_rule.operand
assert False is bp_rule.not_value
assert ('1', '2', '2') == bp_rule.of_values
hst1 = self._sched.hosts.find_by_name("test_host_01")
hst2 = self._sched.hosts.find_by_name("test_host_02")
sons = bp_rule.sons
assert 2 == len(sons)
assert 'host' == sons[0].operand
assert 'host' == sons[1].operand
assert hst1.uuid in (sons[0].sons[0], sons[1].sons[0])
assert hst2.uuid in (sons[0].sons[0], sons[1].sons[0])
def test_hostgroup_expansion_bprule_same_host_srv(self):
""" BR expansion - sale host/service """
for name in ("bprule_60", "bprule_61"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_01", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert '&' == bp_rule.operand
assert False is bp_rule.not_value
assert ('2', '2', '2') == bp_rule.of_values
srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv2")
sons = bp_rule.sons
assert 2 == len(sons)
assert 'service' == sons[0].operand
assert 'service' == sons[1].operand
assert srv1.uuid in (sons[0].sons[0], sons[1].sons[0])
assert srv2.uuid in (sons[0].sons[0], sons[1].sons[0])
def test_hostgroup_expansion_bprule_xof_same_host_srv(self):
""" BR expansion - X of: same host/service """
for name in ("bprule_70", "bprule_71"):
svc_cor = self._sched.services.find_srv_by_name_and_hostname("test_host_01", name)
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
bp_rule = svc_cor.business_rule
assert 'of:' == bp_rule.operand
assert False is bp_rule.not_value
assert ('1', '2', '2') == bp_rule.of_values
srv1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
srv2 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv2")
sons = bp_rule.sons
assert 2 == len(sons)
assert 'service' == sons[0].operand
assert 'service' == sons[1].operand
assert srv1.uuid in (sons[0].sons[0], sons[1].sons[0])
assert srv2.uuid in (sons[0].sons[0], sons[1].sons[0])
def test_macro_expansion_bprule_no_macro(self):
""" BR expansion - no macro """
# Tests macro expansion
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", "bprule_no_macro")
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "1 of: test_host_01,srv1 & test_host_02,srv2" == svc_cor.processed_business_rule
bp_rule = svc_cor.business_rule
assert 'of:' == bp_rule.operand
assert ('1', '2', '2') == bp_rule.of_values
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
# Setting dependent services status
self.scheduler_loop(1, [
[svc1, 0, 'UP | value1=1 value2=2'],
[svc2, 0, 'UP | value1=1 value2=2']])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'OK' == svc2.state
assert 'HARD' == svc2.state_type
self.scheduler_loop(2, [
[svc1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc1.state
assert 'HARD' == svc1.state_type
# Forces business rule evaluation.
self.scheduler_loop(2, [
[svc_cor, None, None]
])
# Business rule should not have been re-evaluated (no macro in the
# bp_rule)
assert bp_rule is svc_cor.business_rule
bp_rule = svc_cor.business_rule
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == svc_cor.last_hard_state_id
def test_macro_expansion_bprule_macro_expand(self):
""" BR expansion - macro expansion """
# Tests macro expansion
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy", "bprule_macro_expand")
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
assert "1 of: test_host_01,srv1 & test_host_02,srv2" == svc_cor.processed_business_rule
bp_rule = svc_cor.business_rule
assert 'of:' == bp_rule.operand
assert ('1', '2', '2') == bp_rule.of_values
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
# Setting dependent services status
self.scheduler_loop(1, [
[svc1, 0, 'UP | value1=1 value2=2'],
[svc2, 0, 'UP | value1=1 value2=2']])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'OK' == svc2.state
assert 'HARD' == svc2.state_type
self.scheduler_loop(2, [
[svc1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc1.state
assert 'HARD' == svc1.state_type
# Forces business rule evaluation.
self.scheduler_loop(2, [
[svc_cor, None, None]
])
# Business rule should not have been re-evaluated (macro did not change
# value)
assert bp_rule is svc_cor.business_rule
bp_rule = svc_cor.business_rule
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == svc_cor.last_hard_state_id
def test_macro_expansion_bprule_macro_modulated(self):
""" BR expansion - macro modulated """
# Tests macro modulation
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy_modulated",
"bprule_macro_modulated")
svc_cor.act_depend_of = [] # no host checks on critical check results
assert True is svc_cor.got_business_rule
assert svc_cor.business_rule is not None
# !!!!!!!!!!!!!!!!! assert "2 of: test_host_01,srv1 & test_host_02,srv2" ==
# svc_cor.processed_business_rule
assert 'of:' == svc_cor.business_rule.operand
assert ('2', '2', '2') == svc_cor.business_rule.of_values
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc1.act_depend_of = [] # no host checks on critical check results
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
svc2.act_depend_of = [] # no host checks on critical check results
# Setting dependent services status
# - both are OK
self.scheduler_loop(2, [
[svc1, 0, 'OK | value1=1 value2=2'],
[svc2, 0, 'OK | value1=1 value2=2']
])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'OK' == svc2.state
assert 'HARD' == svc2.state_type
# - but now one is critical
self.scheduler_loop(2, [
[svc1, 2, 'CRITICAL | value1=1 value2=2']
])
assert 'CRITICAL' == svc1.state
assert 'HARD' == svc1.state_type
# Launch an internal check
print("BR: %s" % svc_cor.business_rule)
self.launch_internal_check(svc_cor)
print("BR: %s" % svc_cor.business_rule)
# Business rule should not have been re-evaluated (macro did not change value)
# assert bp_rule is svc_cor.business_rule
# bp_rule = svc_cor.business_rule
assert 2 == svc_cor.business_rule.get_state(self._sched.hosts, self._sched.services)
assert 2 == svc_cor.last_hard_state_id
# Get macro modulation value and change its value
mod = self._sched.macromodulations.find_by_name("xof_modulation")
mod.customs['_XOF'] = '1'
# Launch an internal check
self.launch_internal_check(svc_cor)
assert "1 of: test_host_01,srv1 & test_host_02,srv2" == svc_cor.processed_business_rule
# assert svc_cor.business_rule is not bp_rule
bp_rule = svc_cor.business_rule
assert 'of:' == bp_rule.operand
assert ('1', '2', '2') == bp_rule.of_values
assert 0 == bp_rule.get_state(self._sched.hosts, self._sched.services)
assert 0 == svc_cor.last_hard_state_id
# Tests wrongly written macro modulation (inserts invalid string)
mod.customs['_XOF'] = 'fake'
# Launch an internal check
self.launch_internal_check(svc_cor)
# Business rule should have been re-evaluated (macro was modulated)
assert bp_rule is svc_cor.business_rule
assert 3 == svc_cor.last_hard_state_id
assert svc_cor.output.startswith("Error while re-evaluating business rule")
def test_macro_expansion_bprule_macro_profile(self):
""" BR expansion - macro profile """
if PROFILE_BP_RULE_RE_PROCESSING is False:
return
import cProfile as profile
svc1 = self._sched.services.find_srv_by_name_and_hostname("test_host_01", "srv1")
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_host_02", "srv2")
# Setting dependent services status
self.scheduler_loop(1, [
[svc1, 0, 'UP | value1=1 value2=2'],
[svc2, 0, 'UP | value1=1 value2=2']
])
assert 'OK' == svc1.state
assert 'HARD' == svc1.state_type
assert 'OK' == svc2.state
assert 'HARD' == svc2.state_type
self.scheduler_loop(1, [[svc1, 2, 'CRITICAL | value1=1 value2=2']], verbose=False)
assert 'CRITICAL' == svc1.state
assert 'HARD' == svc1.state_type
print("Profiling without macro")
def profile_bp_rule_without_macro():
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"bprule_no_macro")
for i in range(1000):
self.scheduler_loop(2, [
[svc_cor, None, None]
])
profile.runctx('profile_bp_rule_without_macro()', globals(), locals())
print("Profiling with macro")
def profile_bp_rule_macro_expand():
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy",
"bprule_macro_expand")
for i in range(1000):
self.scheduler_loop(2, [
[svc_cor, None, None]
])
profile.runctx('profile_bp_rule_macro_expand()', globals(), locals())
print("Profiling with macro modulation")
def profile_bp_rule_macro_modulated():
svc_cor = self._sched.services.find_srv_by_name_and_hostname("dummy_modulated",
"bprule_macro_modulated")
for i in range(1000):
self.scheduler_loop(2, [
[svc_cor, None, None]
])
profile.runctx('profile_bp_rule_macro_modulated()', globals(), locals())
|
class TestBusinessCorrelatorExpand(AlignakTest):
def setUp(self):
pass
def test_hostgroup_expansion_bprule_simple_host_srv(self):
''' BR expansion - simple host/service '''
pass
def test_hostgroup_expansion_bprule_simple_xof_host_srv(self):
''' BR expansion - simple X of:'''
pass
def test_hostgroup_expansion_bprule_combined_and(self):
''' BR expansion - combined AND '''
pass
def test_hostgroup_expansion_bprule_combined_or(self):
''' BR expansion - combined OR '''
pass
def test_hostgroup_expansion_bprule_simple_hosts(self):
''' BR expansion - simple hosts '''
pass
def test_hostgroup_expansion_bprule_xof_hosts(self):
''' BR expansion - X of: hosts '''
pass
def test_hostgroup_expansion_bprule_same_host_srv(self):
''' BR expansion - sale host/service '''
pass
def test_hostgroup_expansion_bprule_xof_same_host_srv(self):
''' BR expansion - X of: same host/service '''
pass
def test_macro_expansion_bprule_no_macro(self):
''' BR expansion - no macro '''
pass
def test_macro_expansion_bprule_macro_expand(self):
''' BR expansion - macro expansion '''
pass
def test_macro_expansion_bprule_macro_modulated(self):
''' BR expansion - macro modulated '''
pass
def test_macro_expansion_bprule_macro_profile(self):
''' BR expansion - macro profile '''
pass
def profile_bp_rule_without_macro():
pass
def profile_bp_rule_macro_expand():
pass
def profile_bp_rule_macro_modulated():
pass
| 17 | 12 | 28 | 4 | 21 | 3 | 2 | 0.15 | 1 | 2 | 0 | 0 | 13 | 1 | 13 | 68 | 433 | 79 | 315 | 94 | 297 | 46 | 280 | 94 | 262 | 3 | 2 | 2 | 30 |
4,045 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.Test_SchedulerLink_get_name
|
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link scheduler"""
def setUp(self):
super(Test_SchedulerLink_get_name, self).setUp()
daemon_link = SchedulerLink
|
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, AlignakTest):
'''Test satellite link scheduler'''
def setUp(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 58 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,046 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.Test_ReceiverLink_get_name
|
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link receiver"""
def setUp(self):
super(Test_ReceiverLink_get_name, self).setUp()
daemon_link = ReceiverLink
|
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, AlignakTest):
'''Test satellite link receiver'''
def setUp(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 58 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,047 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.Test_ReactionnerLink_get_name
|
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link reactionner"""
def setUp(self):
super(Test_ReactionnerLink_get_name, self).setUp()
daemon_link = ReactionnerLink
|
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, AlignakTest):
'''Test satellite link reactionner'''
def setUp(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 58 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,048 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_complex_hostgroups.py
|
tests.test_complex_hostgroups.TestComplexHostgroups
|
class TestComplexHostgroups(AlignakTest):
def setUp(self):
super(TestComplexHostgroups, self).setUp()
self.setup_with_file('cfg/hostgroup/complex_hostgroups.cfg')
assert self.conf_is_correct
def get_svc(self):
return self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
def find_service(self, name, desc):
return self._arbiter.conf.services.find_srv_by_name_and_hostname(name, desc)
def find_host(self, name):
return self._arbiter.conf.hosts.find_by_name(name)
def find_hostgroup(self, name):
return self._arbiter.conf.hostgroups.find_by_name(name)
def dump_hosts(self, svc):
for h in svc.host_name:
print(h)
# check if service exist in hst, but NOT in others
def service_defined_only_on(self, service_description, hosts):
"""
Check if the service named as service_description exists on the hosts and
not on the other scheduler hosts
:param service_description: service to search for
:param hosts: list of expected hosts
:return:
"""
result = True
# Exists on the listed hosts
for host in hosts:
svc = self.find_service(host.host_name, service_description)
if svc is None:
print("Error: the host %s is missing service %s!!" % (host.host_name,
service_description))
result = False
# Do not exist on the other hosts
for host in self._arbiter.conf.hosts:
if host not in hosts:
svc = self.find_service(host.host_name, service_description)
if svc is not None:
print("Error: the host %s got the service %s!!" % (host.host_name,
service_description))
result = False
return result
def test_complex_hostgroups(self):
"""
Test a complex hostgroup definition
:return:
"""
# Get all our hosts
test_linux_web_prod_0 = self.find_host('test_linux_web_prod_0')
assert test_linux_web_prod_0 is not None
test_linux_web_qual_0 = self.find_host('test_linux_web_qual_0')
assert test_linux_web_qual_0 is not None
test_win_web_prod_0 = self.find_host('test_win_web_prod_0')
assert test_win_web_prod_0 is not None
test_win_web_qual_0 = self.find_host('test_win_web_qual_0')
assert test_win_web_qual_0 is not None
test_linux_file_prod_0 = self.find_host('test_linux_file_prod_0')
assert test_linux_file_prod_0 is not None
hg_linux = self.find_hostgroup('linux')
assert hg_linux is not None
hg_web = self.find_hostgroup('web')
assert hg_web is not None
hg_win = self.find_hostgroup('win')
assert hg_win is not None
hg_file = self.find_hostgroup('file')
assert hg_file is not None
# Hostgroup linux has 3 hosts
assert hg_linux.get_name() == "linux"
assert len(hg_linux.get_hosts()) == 3
# Expected hosts are in this group
assert test_linux_web_prod_0.uuid in hg_linux.members
assert test_linux_web_qual_0.uuid in hg_linux.members
assert test_linux_file_prod_0.uuid in hg_linux.members
for host in hg_linux:
assert self._arbiter.conf.hosts[host].get_name() in ['test_linux_web_prod_0',
'test_linux_web_qual_0',
'test_linux_file_prod_0']
# First the service defined for the hostgroup: linux
assert self.service_defined_only_on('linux_0', [test_linux_web_prod_0,
test_linux_web_qual_0,
test_linux_file_prod_0])
# Then a service defined for the hostgroups: linux,web
assert self.service_defined_only_on('linux_web_0', [test_linux_web_prod_0,
test_linux_web_qual_0,
test_linux_file_prod_0,
test_win_web_prod_0,
test_win_web_qual_0])
# The service defined for the hostgroup: linux&web
assert self.service_defined_only_on('linux_AND_web_0', [test_linux_web_prod_0,
test_linux_web_qual_0])
# The service defined for the hostgroup: linux|web
assert self.service_defined_only_on('linux_OR_web_0', [test_linux_web_prod_0,
test_linux_web_qual_0,
test_win_web_prod_0,
test_win_web_qual_0,
test_linux_file_prod_0])
# The service defined for the hostgroup: (linux|web),file
assert self.service_defined_only_on('linux_OR_web_PAR_file0', [test_linux_web_prod_0,
test_linux_web_qual_0,
test_win_web_prod_0,
test_win_web_qual_0,
test_linux_file_prod_0,
test_linux_file_prod_0])
# The service defined for the hostgroup: (linux|web)&prod
assert self.service_defined_only_on('linux_OR_web_PAR_AND_prod0', [test_linux_web_prod_0,
test_win_web_prod_0,
test_linux_file_prod_0])
# The service defined for the hostgroup: (linux|web)&(*&!prod)
assert self.service_defined_only_on(
'linux_OR_web_PAR_AND_NOT_prod0', [test_linux_web_qual_0, test_win_web_qual_0])
# The service defined for the hostgroup with a minus sign in its name
assert self.service_defined_only_on('name-with-minus-in-it', [test_linux_web_prod_0])
# The service defined for the hostgroup: (linux|web)&(prod), except an host
assert self.service_defined_only_on(
'linux_OR_web_PAR_AND_prod0_AND_NOT_test_linux_file_prod_0', [test_linux_web_prod_0,
test_win_web_prod_0])
# The service defined for the hostgroup: win&((linux|web)&prod), except an host
assert self.service_defined_only_on(
'WINDOWS_AND_linux_OR_web_PAR_AND_prod0_AND_NOT_test_linux_file_prod_0', [
test_win_web_prod_0])
|
class TestComplexHostgroups(AlignakTest):
def setUp(self):
pass
def get_svc(self):
pass
def find_service(self, name, desc):
pass
def find_host(self, name):
pass
def find_hostgroup(self, name):
pass
def dump_hosts(self, svc):
pass
def service_defined_only_on(self, service_description, hosts):
'''
Check if the service named as service_description exists on the hosts and
not on the other scheduler hosts
:param service_description: service to search for
:param hosts: list of expected hosts
:return:
'''
pass
def test_complex_hostgroups(self):
'''
Test a complex hostgroup definition
:return:
'''
pass
| 9 | 2 | 17 | 2 | 12 | 3 | 2 | 0.29 | 1 | 1 | 0 | 0 | 8 | 0 | 8 | 63 | 142 | 22 | 93 | 23 | 84 | 27 | 66 | 23 | 57 | 6 | 2 | 3 | 15 |
4,049 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_comments.py
|
tests.test_comments.TestComments
|
class TestComments(AlignakTest):
"""
This class test the comments (acknowledge, downtimes...).
"""
def setUp(self):
super(TestComments, self).setUp()
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
def test_host_acknowledge(self):
"""Test add / delete comment for acknowledge on host
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
self.scheduler_loop(1, [[host, 2, 'DOWN']])
time.sleep(0.1)
assert host.state == "DOWN"
assert host.state_type == "SOFT"
self.scheduler_loop(1, [[host, 2, 'DOWN']])
time.sleep(0.1)
assert host.state == "DOWN"
assert host.state_type == "SOFT"
self.scheduler_loop(1, [[host, 2, 'DOWN']])
time.sleep(0.1)
assert host.state == "DOWN"
assert host.state_type == "HARD"
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n".\
format(int(now), host.host_name, 2, 0, 1, 'darth vader', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert host.problem_has_been_acknowledged
# we must have a comment
assert len(host.comments) == 1
# Test with a new acknowledge, will replace previous
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM;{1};{2};{3};{4};{5};{6}\n".\
format(int(now), host.host_name, 2, 0, 1, 'darth vader', 'normal new process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
# we must have a comment
assert len(host.comments) == 1
for comment_id in host.comments:
assert host.comments[comment_id].comment == 'normal new process'
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
# we must have no comment (the comment must be deleted like the acknowledge)
assert not host.problem_has_been_acknowledged
assert len(host.comments) == 0
def test_host_acknowledge_expire(self):
"""Test add / delete comment for acknowledge on host with expire
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert "DOWN" == host.state
assert "SOFT" == host.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;{1};{2};{3};{4};{5};{6};{7}\n".\
format(int(now), host.host_name, 2, 0, 1, int(now) + 3, 'darth vader', 'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
assert host.problem_has_been_acknowledged
# we must have a comment
assert len(host.comments) == 1
time.sleep(3)
self.scheduler_loop(1, [[host, 2, 'DOWN']])
# time.sleep(0.1)
# we must have no comment (the comment must be deleted like the acknowledge)
assert not host.problem_has_been_acknowledged
assert len(host.comments) == 0
def test_service_acknowledge(self):
"""Test add / delete comment for acknowledge on service
:return: None
"""
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.max_check_attempts = 3
self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']])
# time.sleep(0.1)
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert "WARNING" == svc.state
assert "SOFT" == svc.state_type
now = time.time()
cmd = "[{0}] ACKNOWLEDGE_SVC_PROBLEM;{1};{2};{3};{4};{5};{6};{7}\n". \
format(int(now), host.host_name, svc.service_description, 2, 0, 1, 'darth vader',
'normal process')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[svc, 1, 'WARNING']])
# time.sleep(0.1)
assert svc.problem_has_been_acknowledged
# we must have a comment
assert len(svc.comments) == 1
self.scheduler_loop(1, [[svc, 0, 'OK']])
# time.sleep(0.1)
# we must have no comment (the comment must be deleted like the acknowledge)
assert not svc.problem_has_been_acknowledged
assert len(svc.comments) == 0
def test_host_downtime(self):
pass
def test_host_comment(self):
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
now = time.time()
cmd = "[{0}] ADD_HOST_COMMENT;{1};{2};{3};{4}\n". \
format(int(now), host.host_name, 1, 'darth vader', 'nice comment')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
# we must have a comment
assert len(host.comments) == 1
# comment number 2
now = time.time()
cmd = "[{0}] ADD_HOST_COMMENT;{1};{2};{3};{4}\n". \
format(int(now), host.host_name, 1, 'emperor', 'nice comment yes')
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
assert len(host.comments) == 2
# del all comments of the host
now = time.time()
cmd = "[{0}] DEL_ALL_HOST_COMMENTS;{1}\n". \
format(int(now), host.host_name)
self._scheduler.run_external_commands([cmd])
self.scheduler_loop(1, [[host, 0, 'UP']])
# time.sleep(0.1)
assert len(host.comments) == 0
|
class TestComments(AlignakTest):
'''
This class test the comments (acknowledge, downtimes...).
'''
def setUp(self):
pass
def test_host_acknowledge(self):
'''Test add / delete comment for acknowledge on host
:return: None
'''
pass
def test_host_acknowledge_expire(self):
'''Test add / delete comment for acknowledge on host with expire
:return: None
'''
pass
def test_service_acknowledge(self):
'''Test add / delete comment for acknowledge on service
:return: None
'''
pass
def test_host_downtime(self):
pass
def test_host_comment(self):
pass
| 7 | 4 | 30 | 6 | 18 | 7 | 1 | 0.38 | 1 | 2 | 0 | 0 | 6 | 0 | 6 | 61 | 188 | 41 | 110 | 21 | 103 | 42 | 100 | 21 | 93 | 2 | 2 | 1 | 7 |
4,050 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_commands.py
|
tests.test_commands.TestCommand
|
class TestCommand(AlignakTest):
"""
This class tests the commands
"""
def setUp(self):
super(TestCommand, self).setUp()
self.setup_with_file('cfg/cfg_commands.cfg', verbose=False, dispatching=True)
assert self.conf_is_correct
def test_css_in_commands(self):
""" Test CSS and HTML in command """
pass
# The test is implicit because the configuration got loaded!
def test_semi_colon_in_commands(self):
"""Test semi-colon in commands """
# Get the hosts and services"
host = self._arbiter.conf.hosts.find_by_name("test_host_0")
assert host is not None
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_0", "svc_semi_colon")
assert svc is not None
# Event handler command is:
# $USER1$/test_eventhandler.pl $SERVICESTATE$ $SERVICESTATETYPE$ $SERVICEATTEMPT$
#
svc.get_event_handlers(self._scheduler.hosts, self._scheduler.macromodulations,
self._scheduler.timeperiods)
assert len(svc.actions) == 1
for action in svc.actions:
assert action.is_a == 'eventhandler'
assert action.command == '/usr/lib/nagios/plugins/test_eventhandler.pl ' \
'sudo -s pkill toto ; cd /my/path && ./exec'
def test_spaces_in_commands(self):
"""Test spaces in commands
Service is defined as:
service_description svc_spaces
check_command check_snmp_int!public!"Nortel Ethernet Routing Switch 5530-24TFD
Module - Port 2 "!"90000,90000"!"120000,120000"
And command as:
command_name check_snmp_int
command_line $USER1$/check_snmp_int.pl -H $HOSTADDRESS$ -C $ARG1$ -n $ARG2$
-r -f -k -Y -B -w $ARG3$ -c $ARG4$
"""
# Get the hosts and services"
host = self._scheduler.hosts.find_by_name("test_host_0")
assert host is not None
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "svc_spaces")
assert svc is not None
for command in self._scheduler.commands:
print("-act: %s" % command)
# Schedule checks
svc.schedule(self._scheduler.hosts, self._scheduler.services, self._scheduler.timeperiods,
self._scheduler.macromodulations, self._scheduler.checkmodulations,
self._scheduler.checks)
assert len(svc.actions) == 1
for action in svc.actions:
print("Action: %s" % action)
# command:'/usr/lib/nagios/plugins/check_snmp_int.pl -H 127.0.0.1 -C public
# -n "Nortel Ethernet Routing Switch 5530-24TFD Module -
# Port 2 " -r -f -k -Y -B -w "90000 -c 90000"'
assert action.is_a == 'check'
assert action.command == '/usr/lib/nagios/plugins/check_snmp_int.pl ' \
'-H 127.0.0.1 ' \
'-C public ' \
'-n "Nortel Ethernet Routing Switch 5530-24TFD ' \
'Module - Port 2 " ' \
'-r -f -k -Y -B -w "90000,90000" -c "120000,120000"'
# Run checks now
action.t_to_go = 0
# the scheduler need to get this new checks in its own queues
self._scheduler.get_new_actions()
untagged_checks = self._scheduler.get_to_run_checks(True, False, poller_tags=['None'])
assert len(untagged_checks) == 1
for check in untagged_checks:
assert check.is_a == 'check'
assert check.command == '/usr/lib/nagios/plugins/check_snmp_int.pl ' \
'-H 127.0.0.1 ' \
'-C public ' \
'-n "Nortel Ethernet Routing Switch 5530-24TFD ' \
'Module - Port 2 " ' \
'-r -f -k -Y -B -w "90000,90000" -c "120000,120000"'
def test_command_no_parameters(self):
""" Test command without parameters
:return: None
"""
# No parameters
c = Command({})
# No command_name nor command_line attribute exist!
# Todo: __init__ may raise an exception because of this, no?
assert getattr(c, 'command_name', None) is None
assert getattr(c, 'command_line', None) is None
assert c.poller_tag == 'None'
assert c.reactionner_tag == 'None'
assert c.timeout == -1
assert c.module_type == 'fork'
assert c.enable_environment_macros == False
b = c.get_initial_status_brok()
assert 'initial_command_status' == b.type
assert 'command_name' not in b.data
assert 'command_line' not in b.data
def test_command_with_tags(self):
""" Test command with poller/reactionner tag
:return: None
"""
# Get a command
c = self._arbiter.conf.commands.find_by_name("command_poller_tag")
assert c is not None
assert c.poller_tag == 'tag1'
assert c.reactionner_tag == 'None'
# Get a command
c = self._arbiter.conf.commands.find_by_name("command_reactionner_tag")
assert c is not None
assert c.poller_tag == 'None'
assert c.reactionner_tag == 'tag2'
def test_command_internal_host_up(self):
""" Test internal command _internal_host_up
:return: None
"""
c = Command({
'command_name': '_internal_host_up',
'command_line': '_internal_host_up'
})
assert c.command_name == '_internal_host_up'
assert c.get_name() == '_internal_host_up'
assert c.command_line == '_internal_host_up'
assert c.poller_tag == 'None'
assert c.reactionner_tag == 'None'
assert c.timeout == -1
# Module type is the command name without the '_' prefix
assert c.module_type == 'internal'
assert c.enable_environment_macros == False
b = c.get_initial_status_brok()
assert 'initial_command_status' == b.type
assert 'command_name' in b.data
assert 'command_line' in b.data
def test_command_internal_echo(self):
""" Test internal command _echo
:return: None
"""
c = Command({
'command_name': '_echo',
'command_line': '_echo'
})
assert c.command_name == '_echo'
assert c.get_name() == '_echo'
assert c.command_line == '_echo'
assert c.poller_tag == 'None'
assert c.reactionner_tag == 'None'
assert c.timeout == -1
# Module type is the command name without the '_' prefix
assert c.module_type == 'internal'
assert c.enable_environment_macros == False
b = c.get_initial_status_brok()
assert 'initial_command_status' == b.type
assert 'command_name' in b.data
assert 'command_line' in b.data
def test_command_build(self):
""" Test command build
:return: None
"""
c = Command({
'command_name': 'check_command_test',
'command_line': '/tmp/dummy_command.sh $ARG1$ $ARG2$',
'module_type': 'nrpe-booster',
'poller_tag': 'DMZ',
'reactionner_tag': 'REAC'
})
assert c.command_name == 'check_command_test'
assert c.get_name() == 'check_command_test'
assert c.command_line == '/tmp/dummy_command.sh $ARG1$ $ARG2$'
assert c.poller_tag == 'DMZ'
assert c.reactionner_tag == 'REAC'
assert c.timeout == -1
assert c.module_type == 'nrpe-booster'
assert c.enable_environment_macros == False
b = c.get_initial_status_brok()
assert 'initial_command_status' == b.type
assert 'command_name' in b.data
assert 'command_line' in b.data
def test_commands_call(self):
""" Test commands call
:return: None
"""
c1 = Command({
'command_name': 'check_command_test1',
'command_line': '/tmp/dummy_command.sh $ARG1$ $ARG2$',
'module_type': 'nrpe-booster',
'poller_tag': 'DMZ',
'reactionner_tag': 'REAC'
})
c2 = Command({
'command_name': 'check_command_test2',
'command_line': '/tmp/dummy_command.sh $ARG1$ $ARG2$',
'module_type': 'nrpe-booster',
'poller_tag': 'DMZ',
'reactionner_tag': 'REAC'
})
# now create a commands list
cs = Commands([c1, c2])
# And a command call with commands (used on configuration parsing)
dummy_call = "check_command_test1!titi!toto"
cc = CommandCall({"commands": cs, "command_line": dummy_call}, parsing=True)
assert True == cc.is_valid()
# Got the command object matching the command line
assert c1 == cc.command
assert 'DMZ' == cc.poller_tag
assert 'REAC' == cc.reactionner_tag
|
class TestCommand(AlignakTest):
'''
This class tests the commands
'''
def setUp(self):
pass
def test_css_in_commands(self):
''' Test CSS and HTML in command '''
pass
def test_semi_colon_in_commands(self):
'''Test semi-colon in commands '''
pass
def test_spaces_in_commands(self):
'''Test spaces in commands
Service is defined as:
service_description svc_spaces
check_command check_snmp_int!public!"Nortel Ethernet Routing Switch 5530-24TFD
Module - Port 2 "!"90000,90000"!"120000,120000"
And command as:
command_name check_snmp_int
command_line $USER1$/check_snmp_int.pl -H $HOSTADDRESS$ -C $ARG1$ -n $ARG2$
-r -f -k -Y -B -w $ARG3$ -c $ARG4$
'''
pass
def test_command_no_parameters(self):
''' Test command without parameters
:return: None
'''
pass
def test_command_with_tags(self):
''' Test command with poller/reactionner tag
:return: None
'''
pass
def test_command_internal_host_up(self):
''' Test internal command _internal_host_up
:return: None
'''
pass
def test_command_internal_echo(self):
''' Test internal command _echo
:return: None
'''
pass
def test_command_build(self):
''' Test command build
:return: None
'''
pass
def test_commands_call(self):
''' Test commands call
:return: None
'''
pass
| 11 | 10 | 23 | 3 | 15 | 5 | 1 | 0.37 | 1 | 3 | 2 | 0 | 10 | 0 | 10 | 65 | 247 | 39 | 153 | 34 | 142 | 57 | 115 | 34 | 104 | 4 | 2 | 1 | 14 |
4,051 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_checks_modulations.py
|
tests.test_checks_modulations.TestCheckModulations
|
class TestCheckModulations(AlignakTest):
def setUp(self):
super(TestCheckModulations, self).setUp()
self.setup_with_file('./cfg/cfg_checks_modulations.cfg',
dispatching=True)
assert self.conf_is_correct
def test_checks_modulated_host_and_service(self):
""" Check modulation for an host and its service """
# Get the host
host = self._scheduler.hosts.find_by_name("modulated_host")
assert host is not None
assert host.check_command is not None
# Get the check modulation
mod = self._scheduler.checkmodulations.find_by_name("MODULATION")
assert mod is not None
assert mod.get_name() == "MODULATION"
# Modulation is known by the host
assert mod.uuid in host.checkmodulations
# Modulation check command is not the same as the host one
assert mod.get_check_command(self._scheduler.timeperiods, time.time()) is not host.check_command
# Get the host service
svc = self._scheduler.services.find_srv_by_name_and_hostname("modulated_host",
"modulated_service")
# Service is going CRITICAL/HARD ... this forces an host check!
self.scheduler_loop(1, [[svc, 2, 'BAD']])
assert len(host.checks_in_progress) == 1
for c in host.checks_in_progress:
assert 'plugins/nothing VALUE' == self._scheduler.checks[c].command
assert len(svc.checks_in_progress) == 1
for c in svc.checks_in_progress:
assert 'plugins/nothing VALUE' == self._scheduler.checks[c].command
|
class TestCheckModulations(AlignakTest):
def setUp(self):
pass
def test_checks_modulated_host_and_service(self):
''' Check modulation for an host and its service '''
pass
| 3 | 1 | 17 | 2 | 12 | 4 | 2 | 0.29 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 37 | 6 | 24 | 7 | 21 | 7 | 22 | 7 | 19 | 3 | 2 | 1 | 4 |
4,052 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_business_impact_modulation.py
|
tests.test_business_impact_modulation.TestBusinessImpactModulation
|
class TestBusinessImpactModulation(AlignakTest):
def setUp(self):
super(TestBusinessImpactModulation, self).setUp()
self.setup_with_file('cfg/cfg_businesssimpact_modulation.cfg',
dispatching=True)
assert self.conf_is_correct
def test_business_impact_modulation(self):
""" Tests business impact modulation """
# Get our scheduler BI modulations
bi_modulation = self._scheduler.businessimpactmodulations.find_by_name('CritMod')
assert bi_modulation is not None
assert bi_modulation.get_name() == "CritMod"
assert bi_modulation.business_impact == 5
# Get our service
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_00")
assert bi_modulation.uuid in svc.business_impact_modulations
# Service BI is defined as 2
assert svc.business_impact == 2
# Default scheduler loop updates the BI every 60 loop turns
# Update business impact on each scheduler tick
self._scheduler.update_recurrent_works_tick({'tick_update_business_values': 1})
self.scheduler_loop(2, [])
# Service BI is defined as 2 but the BI modulation makes it be 5!
assert svc.business_impact == 5
|
class TestBusinessImpactModulation(AlignakTest):
def setUp(self):
pass
def test_business_impact_modulation(self):
''' Tests business impact modulation '''
pass
| 3 | 1 | 13 | 1 | 8 | 4 | 1 | 0.41 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 28 | 4 | 17 | 5 | 14 | 7 | 16 | 5 | 13 | 1 | 2 | 0 | 2 |
4,053 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.WeekDayDaterange
|
class WeekDayDaterange(Daterange):
"""WeekDayDaterange is for month week day entry (weekday offset - weekday offset)
"""
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
# If no year, it's our year
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Same for end year
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_weekday_offset(self.eyear, month_end_id, self.ewday,
self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
# Maybe end_time is before start. So look for the
# next month
if start_time > end_time:
month_end_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
# But maybe we look not enought far. We should add a month
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# First start
day_start = find_day_by_weekday_offset(self.syear,
month_start_id, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# Then end
day_end = find_day_by_weekday_offset(self.eyear,
month_end_id, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
class WeekDayDaterange(Daterange):
'''WeekDayDaterange is for month week day entry (weekday offset - weekday offset)
'''
def get_start_and_end_time(self, ref=None):
'''Specific function to get start time and end time for WeekDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
'''
pass
| 2 | 2 | 58 | 6 | 39 | 13 | 8 | 0.38 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 26 | 62 | 7 | 40 | 12 | 38 | 15 | 35 | 12 | 33 | 8 | 4 | 2 | 8 |
4,054 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_config.py
|
tests.test_config.TestConfig
|
class TestConfig(AlignakTest):
"""
This class tests the configuration
"""
def setUp(self):
print("Request: %s" % __file__)
super(TestConfig, self).setUp()
def test_config_ok(self):
""" Default shipped configuration has no loading problems ...
:return: None
"""
self.setup_with_file(configuration_file='../etc/alignak.cfg',
env_file='./etc/alignak.ini',
dispatching=False)
assert self.conf_is_correct
self.show_logs()
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Arbiter named as in the configuration
assert self._arbiter.conf.conf_is_correct
arbiter_link = self._arbiter.conf.arbiters.find_by_name('arbiter-master')
assert arbiter_link is not None
# # This will be verified only if the configuration is configured with `clean_objects=1`
# assert not hasattr(arbiter_link, 'configuration_errors')
# assert not hasattr(arbiter_link, 'configuration_warnings')
# Scheduler named as in the configuration
assert self._arbiter.conf.conf_is_correct
scheduler_link = self._arbiter.conf.schedulers.find_by_name('scheduler-master')
assert scheduler_link is not None
# # Scheduler configuration is ok
# assert self._scheduler.pushed_conf.conf_is_correct
# Broker, Poller, Reactionner named as in the configuration
link = self._arbiter.conf.brokers.find_by_name('broker-master')
assert link is not None
link = self._arbiter.conf.pollers.find_by_name('poller-master')
assert link is not None
link = self._arbiter.conf.reactionners.find_by_name('reactionner-master')
assert link is not None
# Receiver - no default receiver created
link = self._arbiter.conf.receivers.find_by_name('receiver-master')
assert link is not None
def test_config_ok_2(self):
""" Default shipped configuration has no loading problems ... even when using the
default shipped ini file
:return: None
"""
self.setup_with_file(configuration_file='../etc/alignak.cfg',
env_file='./etc/alignak.ini',
dispatching=True)
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Arbiter named as in the configuration
assert self._arbiter.conf.conf_is_correct
arbiter_link = self._arbiter.conf.arbiters.find_by_name('arbiter-master')
assert arbiter_link is not None
# # This will be verified only if the configuration is configured with `clean_objects=1`
# assert not hasattr(arbiter_link, 'configuration_errors')
# assert not hasattr(arbiter_link, 'configuration_warnings')
# Scheduler named as in the configuration
assert self._arbiter.conf.conf_is_correct
scheduler_link = self._arbiter.conf.schedulers.find_by_name('scheduler-master')
assert scheduler_link is not None
# Scheduler configuration is ok
assert self._scheduler.pushed_conf.conf_is_correct
# The scheduler got some hosts
for host in self._scheduler.pushed_conf.hosts:
print("Host: %s" % host)
assert len(self._scheduler.pushed_conf.hosts) == 48
# The arbiter prepared configuration
print("Prepared:")
for realm in self._arbiter.conf.realms:
print("-realm %s:" % realm.name)
# Some parts
assert realm.parts.values()
for cfg_part in list(realm.parts.values()):
print(" .%s (%s), flavor:%s, %s"
% (cfg_part.instance_id, cfg_part.uuid, cfg_part.push_flavor, cfg_part))
# Some contacts and templates
print(" -> contacts: %s" % cfg_part.contacts.__dict__)
assert cfg_part.contacts
# Some hosts and templates
print(" -> hosts: %s" % cfg_part.hosts.__dict__)
assert cfg_part.hosts
assert cfg_part.hosts.templates
# Some services and templates
print(" -> services: %s" % cfg_part.services.__dict__)
assert cfg_part.services
assert cfg_part.services.templates
# The scheduler received configuration
print("Received:")
# Some contacts and templates
print(" -> contacts: %s" % self._scheduler.pushed_conf.contacts.__dict__)
assert self._scheduler.pushed_conf.contacts
# Some hosts and templates
print(" -> hosts: %s" % self._scheduler.pushed_conf.hosts.__dict__)
assert self._scheduler.pushed_conf.hosts
assert self._scheduler.pushed_conf.hosts.templates
# Some services and templates
print(" -> services: %s" % self._scheduler.pushed_conf.services.__dict__)
assert self._scheduler.pushed_conf.services
assert self._scheduler.pushed_conf.services.templates
def test_config_test_ok(self):
""" Default test configuration has no loading problems ...
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Arbiter named as in the configuration
assert self._arbiter.conf.conf_is_correct
arbiter_link = self._arbiter.conf.arbiters.find_by_name('arbiter-master')
assert arbiter_link is not None
# # This will be verified only if the configuration is configured with `clean_objects=1`
# assert not hasattr(arbiter_link, 'configuration_errors')
# assert not hasattr(arbiter_link, 'configuration_warnings')
# Scheduler named as in the configuration
assert self._arbiter.conf.conf_is_correct
scheduler_link = self._arbiter.conf.schedulers.find_by_name('scheduler-master')
assert scheduler_link is not None
# # Scheduler configuration is ok
# assert self._scheduler.pushed_conf.conf_is_correct
# Broker, Poller, Reactionner and Receiver named as in the configuration
link = self._arbiter.conf.brokers.find_by_name('broker-master')
assert link is not None
link = self._arbiter.conf.pollers.find_by_name('poller-master')
assert link is not None
link = self._arbiter.conf.reactionners.find_by_name('reactionner-master')
assert link is not None
link = self._arbiter.conf.receivers.find_by_name('receiver-master')
assert link is not None
def test_host_name_pattern(self):
""" Default test configuration has no loading problems ...
:return: None
"""
self.setup_with_file('cfg/config/host_name_pattern.cfg')
assert self.conf_is_correct
self.show_logs()
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Search hosts by name
# From a patterned host: test_[0-2], we have test_0, test_1 and test_2
host = self._arbiter.conf.hosts.find_by_name('test_0')
assert host is not None
host = self._arbiter.conf.hosts.find_by_name('test_1')
assert host is not None
host = self._arbiter.conf.hosts.find_by_name('test_2')
assert host is not None
# From a patterned host: test_[0-2-%02d], we have test_00, test_01 and test_02
host = self._arbiter.conf.hosts.find_by_name('test_00')
assert host is not None
host = self._arbiter.conf.hosts.find_by_name('test_01')
assert host is not None
host = self._arbiter.conf.hosts.find_by_name('test_02')
assert host is not None
host = self._arbiter.conf.hosts.find_by_name('test_03')
assert host is None
def test_config_conf_inner_properties(self):
""" Default configuration has no loading problems ...
and inner default properties are correctly valued
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Arbiter configuration is correct
assert self._arbiter.conf.conf_is_correct
# Configuration inner properties are valued
assert self._arbiter.conf.main_config_file == os.path.abspath(
os.path.join(self._test_dir, 'cfg/cfg_default.cfg'))
# Default Alignak name is the arbiter name but it may be set from the configuration
assert self._arbiter.conf.alignak_name == 'My Alignak'
assert self._arbiter.alignak_name == 'My Alignak'
# Default Alignak daemons start/stop configuration
# assert self._arbiter.conf.daemons_start_timeout == 1
# Changed to 5 seconds for tests purpose
assert self._arbiter.conf.daemons_start_timeout == 1
assert self._arbiter.conf.daemons_stop_timeout == 5
def test_config_conf_inner_properties_named_alignak(self):
""" Default configuration with an alignak_name property
:return: None
"""
self.setup_with_file('cfg/cfg_default_alignak_name.cfg',
dispatching=True)
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Arbiter configuration is correct
assert self._arbiter.conf.conf_is_correct
# Alignak name is defined in the configuration (from the Nagios legacy)
assert self._arbiter.conf.alignak_name == 'my_alignak'
# Alignak name is defined in the arbiter (from the ini configuration file or
# from the command line)
# The value defined in the Cfg files takes precedence over the one in alignak.ini!
# assert self._arbiter.alignak_name == 'My Alignak'
assert self._arbiter.alignak_name == 'my_alignak'
# Alignak name is defined in the configuration dispatched to the schedulers
assert len(self._arbiter.dispatcher.schedulers) == 1
for scheduler in self._arbiter.dispatcher.schedulers:
assert 'alignak_name' in scheduler.cfg
assert scheduler.cfg.get('alignak_name') == 'my_alignak'
# Alignak name is defined in the configuration dispatched to the satellites
assert len(self._arbiter.dispatcher.satellites) == 4
for satellite in self._arbiter.dispatcher.satellites:
assert 'alignak_name' in satellite.cfg
assert satellite.cfg.get('alignak_name') == 'my_alignak'
def test_config_ok_no_declared_daemons(self):
""" Default configuration has no loading problems ... but no daemons are defined
The arbiter will create default daemons except for the receiver.
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg', 'cfg/config/alignak-no-daemons.ini')
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
# Arbiter named as Default
assert self._arbiter.conf.conf_is_correct
# Use the generic daemon name in the alignak.ini file!
arbiter_link = self._arbiter.conf.arbiters.find_by_name('Default-Arbiter')
for arb in self._arbiter.conf.arbiters:
print("Arbiters: %s" % arb.name)
assert arbiter_link is not None
# # This will be verified only if the configuration is configured with `clean_objects=1`
# assert not hasattr(arbiter_link, 'configuration_errors')
# assert not hasattr(arbiter_link, 'configuration_warnings')
# Scheduler named as Default
link = self._arbiter.conf.schedulers.find_by_name('Default-Scheduler')
assert link is not None
# # Scheduler configuration is ok
# assert self._schedulers['Default-Scheduler'].pushed_conf.conf_is_correct
# Broker, Poller, Reactionner and Receiver named as Default
link = self._arbiter.conf.brokers.find_by_name('Default-Broker')
assert link is not None
link = self._arbiter.conf.pollers.find_by_name('Default-Poller')
assert link is not None
link = self._arbiter.conf.reactionners.find_by_name('Default-Reactionner')
assert link is not None
link = self._arbiter.conf.receivers.find_by_name('Default-Receiver')
assert link is not None
def test_symlinks(self):
""" Test a configuration with symlinks to files
:return: None
"""
if os.name == 'nt':
return
self.setup_with_file('cfg/conf_in_symlinks/alignak_conf_in_symlinks.cfg')
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_0",
"test_HIDDEN")
assert svc is not None
def test_define_syntax(self):
""" Test that define{} syntax is correctly checked: spaces, multi-lines, white-spaces
do not raise any error ...
:return: None
"""
self.setup_with_file('cfg/config/alignak_define_with_space.cfg')
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
host = self._arbiter.conf.hosts.find_by_name('spaced-host')
assert host is not None
def test_plus_syntax(self):
""" Test that plus (+) is not allowed for single value properties
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/host_bad_plus_syntax.cfg')
self.show_logs()
assert not self.conf_is_correct
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_1] Configuration is incorrect"
))
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_1] A + value for a single string (display_name) is not handled"
))
self.assert_any_cfg_log_match(re.escape(
"hosts configuration is incorrect!"
))
assert len(self.configuration_errors) == 3
assert len(self.configuration_warnings) == 1
def test_underscore_syntax(self):
""" Test that underscore (_) is not allowed for list value properties
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/host_macro_is_a_list.cfg')
self.show_logs()
assert not self.conf_is_correct
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_1] Configuration is incorrect"
))
self.assert_any_cfg_log_match(re.escape(
"[host::test_host_1] A + value for a single string (_macro_list_plus) is not handled"
))
self.assert_any_cfg_log_match(re.escape(
"hosts configuration is incorrect!"
))
assert len(self.configuration_errors) == 3
assert len(self.configuration_warnings) == 1
def test_definition_order(self):
""" Test element definition order
An element (host, service, ...) can be defined several times then the definition_order
will be used to choose which definition is the to be used one...
Here, the 'same_service' is defined 3 times but the 'general1' command one will be
retained rather than other because have the lower definition_order ...
:return: None
"""
self.setup_with_file('cfg/config/alignak_definition_order.cfg')
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"myhost", "same_service")
assert svc is not None
assert 'general1' == svc.check_command.command.command_name
assert 1 == svc.definition_order
def test_service_not_hostname(self):
""" Test the 'not hostname' syntax
The service test_ok_0 is applied with a host_group on "test_host_0","test_host_1"
but have a host_name with !"test_host_1" so it will only be attached to "test_host_0"
:return: None
"""
self.setup_with_file('cfg/config/alignak_service_not_hostname.cfg')
assert self.conf_is_correct
host = self._arbiter.conf.hosts.find_by_name("test_host_0")
assert host is not None
assert host.is_correct()
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_0", "test_ok_0")
# Check that the service is attached to test_host_0
assert svc is not None
assert svc.is_correct()
# Check that the service is NOT attached to test_host_1
svc_not = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_1", "test_ok_0")
assert svc_not is None
def test_service_inheritance(self):
""" Test services inheritance
Services are attached to hosts thanks to template inheritance
SSH services are created from a template and attached to an host
svc_inherited is created from a service template linked to an host template with a simple
host declaration
:return: None
"""
self.setup_with_file('cfg/config/alignak_service_description_inheritance.cfg')
assert self.conf_is_correct
# self._sched = self._scheduler
# Service linked to an host
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("MYHOST", "SSH")
assert svc is not None
# Service linked to several hosts
for hname in ["MYHOST2", "MYHOST3"]:
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(hname, "SSH")
assert svc is not None
# ---
# Test services created because service template linked to host template
# An host
host = self._arbiter.conf.hosts.find_by_name("test_host")
assert host is not None
for service in host.services:
if service in self._arbiter.conf.services:
print("Host service: %s" % (self._arbiter.conf.services[service]))
assert len(host.services) == 3
# Service template linked to an host template
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host", "svc_inherited")
assert svc is not None
assert svc.uuid in host.services
assert 'check_ssh' == svc.check_command.command.command_name
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host", "svc_inherited2")
assert svc is not None
assert svc.uuid in host.services
assert 'check_ssh' == svc.check_command.command.command_name
# Another host
host = self._arbiter.conf.hosts.find_by_name("test_host2")
assert host is not None
for service in host.services:
if service in self._arbiter.conf.services:
print(("Host service: %s" % (self._arbiter.conf.services[service])))
assert len(host.services) == 3
# Service template linked to an host template
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host2", "svc_inherited")
assert svc is not None
assert svc.uuid in host.services
assert 'check_ssh' == svc.check_command.command.command_name
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host2", "svc_inherited2")
assert svc is not None
assert svc.uuid in host.services
assert 'check_ssh' == svc.check_command.command.command_name
def test_service_templating_inheritance(self):
""" Test services inheritance
Services are attached to hosts thanks to host/service template relation
:return: None
"""
self.setup_with_file('cfg/config/alignak_service_description_inheritance.cfg')
assert self.conf_is_correct
# An host
host = self._arbiter.conf.hosts.find_by_name("test.host.A")
assert host is not None
# Service linked to hist host
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test.host.A", "nsca_uptime")
assert svc is not None
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test.host.A", "nsca_cpu")
assert svc is not None
def test_service_with_no_host(self):
""" A service not linked to any host raises an error
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/alignak_service_nohost.cfg')
assert not self.conf_is_correct
# assert "Configuration in service::will_not_exist is incorrect; " \
# "from: cfg/config/alignak_service_nohost.cfg:1" in \
# self.configuration_errors
# assert "a service has been defined without host_name nor " \
# "hostgroup_name, from: cfg/config/alignak_service_nohost.cfg:1" in \
# self.configuration_errors
# assert "[service::will_not_exist] not bound to any host." in \
# self.configuration_errors
# assert "[service::will_not_exist] no check_command" in \
# self.configuration_errors
self.assert_any_cfg_log_match(re.escape(
"[service::will_error] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::will_error] unknown host_name 'NOEXIST'"
))
# self.assert_any_cfg_log_match(re.escape(
# "[service::will_error] check_command 'None' invalid"
# ))
self.assert_any_cfg_log_match(re.escape(
"services configuration is incorrect!"
))
# Only one service in the loaded configuration
print("Services list: %s" % self._arbiter.conf.services)
assert len(self._arbiter.conf.services.items) == 1
def test_bad_template_use_itself(self):
""" Detect a template that uses itself as a template
This test host use template but template is itself
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_bad_host_template_itself.cfg')
self.show_logs()
assert not self.conf_is_correct
# Note the double item [arb...] [host::bla] ...
self.assert_any_cfg_log_match(re.escape(
"[host::bla] use/inherits from itself; from: "
))
def test_use_undefined_template(self):
""" Test unknown template detection for host and service
:return: None
"""
self.setup_with_file('cfg/cfg_bad_undefined_template.cfg')
self.show_logs()
assert self.conf_is_correct
# TODO, issue #344
self.assert_any_cfg_log_match(re.escape(
"[host::test_host] use/inherit from an unknown template: undefined_host"
))
self.assert_any_cfg_log_match(re.escape(
"[service::test_service] use/inherit from an unknown template: undefined_service"
))
def test_broken_configuration(self):
""" Configuration is not correct because of a wrong relative path in the main config file
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/alignak_broken_1.cfg')
assert not self.conf_is_correct
# Error messages
assert len(self.configuration_errors) == 2
u = 'u' if os.sys.version_info[:2] < (3, 0) else ''
cwd = self._test_dir
self.assert_any_cfg_log_match(
re.escape(
"cannot open file '%s/cfg/config/etc/broken_1/minimal.cfg' "
"for reading: [Errno 2] No such file or directory: "
"%s'%s/cfg/config/etc/broken_1/minimal.cfg'" % (cwd, u, cwd)
)
)
self.assert_any_cfg_log_match(
re.escape(
"cannot open file '%s/cfg/config/resource.cfg' "
"for reading: [Errno 2] No such file or directory: "
"%s'%s/cfg/config/resource.cfg'" % (cwd, u, cwd)
)
)
def test_malformed_parameters(self):
""" Configuration is not correct because of malformed parameters
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/bad_parameters_syntax.cfg')
assert not self.conf_is_correct
self.show_logs()
# Warning / error messages
# assert len(self.configuration_warnings) == 1
# self.assert_any_cfg_log_match(re.escape(
# "the parameter parameter is ambiguous! No value after =, assuming an empty string"
# ))
assert len(self.configuration_errors) == 1
self.assert_any_cfg_log_match(re.escape(
"the parameter parameter2 is malformed! (no = sign)"
))
def test_nagios_parameters(self):
"""Configuration has some old nagios parameters
:return: None
"""
# with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/deprecated_configuration.cfg')
# assert not self.conf_is_correct
self.show_logs()
# Error messages - none because some deprecation warnings are better!
assert len(self.configuration_errors) == 0
assert len(self.configuration_warnings) == 11
self.assert_any_cfg_log_match(re.escape(
"The configuration parameters 'status_file = /tmp/status' and "
"'object_cache_file = /tmp/cache' are deprecated and will be ignored. "
"Please configure your external 'retention' module as expected."
))
self.assert_any_cfg_log_match(re.escape(
"The configuration parameter 'log_file = /tmp/log' is deprecated and will be ignored. "
"Please configure your external 'logs' module as expected."
))
self.assert_any_cfg_log_match(re.escape(
"The configuration parameter 'use_syslog = True' is deprecated and will be ignored. "
"Please configure your external 'logs' module as expected."
))
# self.assert_any_cfg_log_match(re.escape(
# "The configuration parameters 'host_perfdata_file = /tmp/host_perf' "
# "and 'service_perfdata_file = /tmp/srv_perf' are deprecated and will be ignored. "
# "Please configure your external 'retention' module as expected."
# ))
# Alignak inner module for retention is now implemented!
# self.assert_any_cfg_log_match(re.escape(
# "Your configuration parameters 'state_retention_file = /tmp/retention' "
# "and 'retention_update_interval = 10' need to use an "
# "external module such as 'retention' but I did not found one!"
# ))
self.assert_any_cfg_log_match(re.escape(
"The configuration parameter 'command_file = /tmp/command' is deprecated and will "
"be ignored. "
"Please configure an external commands capable module as expected "
"(eg external-commands, NSCA, or WS module may suit."
))
self.assert_any_cfg_log_match(re.escape(
"use_regexp_matching parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"ochp_command parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"obsess_over_hosts parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"ocsp_command parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"obsess_over_services parameter is not managed."
))
def test_nagios_parameters_2(self):
"""Configuration has some old nagios parameters - some are not raising a configuration error
:return: None
"""
self.setup_with_file('cfg/config/deprecated_configuration_warning.cfg')
assert self.conf_is_correct
self.show_logs()
# Error messages
assert len(self.configuration_errors) == 0
# Warning messages
assert len(self.configuration_warnings) == 6
self.assert_any_cfg_log_match(re.escape(
"failure_prediction_enabled parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"use_regexp_matching parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"ochp_command parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"obsess_over_hosts parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"ocsp_command parameter is not managed."
))
self.assert_any_cfg_log_match(re.escape(
"obsess_over_services parameter is not managed."
))
def test_broken_configuration_2(self):
""" Configuration is not correct because of a non-existing path
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/alignak_broken_2.cfg')
assert not self.conf_is_correct
# Error messages
assert len(self.configuration_errors) == 2
u = 'u' if os.sys.version_info[:2] < (3, 0) else ''
cwd = self._test_dir
self.assert_any_cfg_log_match(re.escape(
u"cannot open directory '%s/cfg/config/not-existing-dir' for reading"
% (cwd)
))
self.assert_any_cfg_log_match(re.escape(
"cannot open file '%s/cfg/config/resource.cfg' for reading: "
"[Errno 2] No such file or directory: %s'%s/cfg/config/resource.cfg'"
% (cwd, u, cwd)
))
def test_bad_timeperiod(self):
""" Test bad timeperiod configuration
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/alignak_bad_timeperiods.cfg')
assert not self.conf_is_correct
self.assert_any_cfg_log_match(
re.escape(
"[timeperiod::24x7_bad2] invalid entry 'satourday 00:00-24:00'"
)
)
self.assert_any_cfg_log_match(
re.escape(
"[timeperiod::24x7_bad] invalid daterange"
)
)
timeperiod = self._arbiter.conf.timeperiods.find_by_name("24x7")
assert True == timeperiod.is_correct()
timeperiod = self._arbiter.conf.timeperiods.find_by_name("24x7_bad")
assert False == timeperiod.is_correct()
timeperiod = self._arbiter.conf.timeperiods.find_by_name("24x7_bad2")
assert False == timeperiod.is_correct()
def test_bad_contact(self):
""" Test a service with an unknown contact
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_bad_contact_in_service.cfg')
assert not self.conf_is_correct
# The service got a unknown contact. It should raise an error
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0_badcon")
print("Svc:", svc)
print("Contacts:", svc.contacts)
assert not svc.is_correct()
self.assert_any_cfg_log_match(re.escape(
"[service::test_ok_0_badcon] Configuration is incorrect; from: "
))
self.assert_any_cfg_log_match(re.escape(
"[service::test_ok_0_badcon] the contact 'IDONOTEXIST' defined for 'test_ok_0_badcon' is unknown"
))
def test_bad_notification_period(self):
""" Configuration is not correct because of an unknown notification_period in a service
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_bad_notificationperiod_in_service.cfg')
assert not self.conf_is_correct
self.show_configuration_logs()
self.assert_any_cfg_log_match(re.escape(
"[service::test_ok_0_badperiod] Configuration is incorrect; from:"
))
self.assert_any_cfg_log_match(re.escape(
"[service::test_ok_0_badperiod] The notification_period named 'IDONOTEXIST' is unknown"
))
def test_bad_realm_conf(self):
""" Configuration is not correct because of an unknown realm member in realm and
an unknown realm in a host
This test do not always pass! This problem is due to the unordered configuration reading.
Sometimes, the hosts are parsed before the realms and sometimes the realms are parsed
before the hosts.
According to the order in which errors are detected, the reported error messages are not
the same!
To avoid such a problem, the realm containing an unknown member for this test must
not be used in an host configuration :)
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_bad_realm_member.cfg')
assert not self.conf_is_correct
self.show_logs()
# Configuration warnings
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm1' but no poller is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a poller (poller-Realm1, http://127.0.0.1:7771/) for the realm 'Realm1'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no poller is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a poller (poller-Realm2, http://127.0.0.1:7771/) for the realm 'Realm2'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm1' but no broker is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a broker (broker-Realm1, http://127.0.0.1:7772/) for the realm 'Realm1'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no broker is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a broker (broker-Realm2, http://127.0.0.1:7772/) for the realm 'Realm2'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm1' but no reactionner is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a reactionner (reactionner-Realm1, http://127.0.0.1:7769/) for the realm 'Realm1'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no reactionner is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a reactionner (reactionner-Realm2, http://127.0.0.1:7769/) for the realm 'Realm2'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm1' but no receiver is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a receiver (receiver-Realm1, http://127.0.0.1:7773/) for the realm 'Realm1'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no receiver is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a receiver (receiver-Realm2, http://127.0.0.1:7773/) for the realm 'Realm2'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm1' but no scheduler is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a scheduler (scheduler-Realm1, http://127.0.0.1:7768/) for the realm 'Realm1'"))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no scheduler is defined for this realm."))
self.assert_any_cfg_log_match(re.escape(
"Added a scheduler (scheduler-Realm2, http://127.0.0.1:7768/) for the realm 'Realm2'"))
self.assert_any_cfg_log_match(re.escape(
"More than one realm is defined as the default one: All,Realm1,Realm2,Realm4. I set All as the default realm."))
# Configuration errors
self.assert_any_cfg_log_match(re.escape(
"The host 'test_host_realm3' is affected to an unknown realm: 'Realm3'"))
# self.assert_any_cfg_log_match(re.escape(
# "the host test_host_realm3 got an invalid realm (Realm3)!"))
# self.assert_any_cfg_log_match(re.escape(
# "in host::test_host_realm3 is incorrect; from: "))
# self.assert_any_cfg_log_match(re.escape(
# "hosts configuration is incorrect!"))
self.assert_any_cfg_log_match(re.escape(
"[realm::Realm4] as realm, got unknown member 'UNKNOWN_REALM'"))
self.assert_any_cfg_log_match(re.escape(
"[realm::Realm4] Configuration is incorrect; from:"))
self.assert_any_cfg_log_match(re.escape(
"realms configuration is incorrect!"))
self.assert_any_cfg_log_match(re.escape(
"Error: the realm configuration of your hosts is not correct because "
"there is more than one realm in one pack (host relations):"))
self.assert_any_cfg_log_match(re.escape(
" -> the host test_host_realm1 is in the realm Realm1"))
self.assert_any_cfg_log_match(re.escape(
" -> the host test_host_realm3 is in the realm Realm3"))
self.assert_any_cfg_log_match(re.escape(
" -> the host test_host_realm2 is in the realm Realm2"))
self.assert_any_cfg_log_match(re.escape(
"There are 6 hosts defined, and 3 hosts dispatched in the realms. "
"Some hosts have been ignored"))
def test_business_rules_incorrect(self):
""" Business rules use services which don't exist.
We want the arbiter to output an error message and exit
in a controlled manner.
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/business_correlator_broken.cfg')
assert not self.conf_is_correct
self.show_configuration_logs()
self.assert_any_cfg_log_match(re.escape(
"[service::Simple_1Of_1unk_host] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::Simple_1Of_1unk_host] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::Simple_1Of_1unk_host] business rule uses unknown host test_host_9"
))
self.assert_any_cfg_log_match(re.escape(
"[service::Simple_1Of_1unk_svc] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::Simple_1Of_1unk_svc] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::Simple_1Of_1unk_svc] business rule uses unknown service test_host_0/db3"
))
self.assert_any_cfg_log_match(re.escape(
"[service::ERP_unk_svc] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::ERP_unk_svc] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::ERP_unk_svc] business rule uses unknown service test_host_0/web100"
))
self.assert_any_cfg_log_match(re.escape(
"[service::ERP_unk_svc] business rule uses unknown service test_host_0/lvs100"
))
self.assert_any_cfg_log_match(re.escape(
"services configuration is incorrect!"
))
def test_business_rules_hostgroup_expansion_errors(self):
""" Configuration is not correct because of a bad syntax in BR hostgroup expansion """
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/business_correlator_expand_expression_broken.cfg')
assert not self.conf_is_correct
self.show_configuration_logs()
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_empty_regex] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_invalid_regex] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_invalid_regex] business rule uses invalid regex"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_empty_regex] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_empty_regex] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_empty_regex] business rule got an empty result "
"for pattern 'r:fake,srv1'"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_unkonwn_service] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_unkonwn_service] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_unkonwn_service] business rule got an empty result "
"for pattern 'g:hostgroup_01,srv3'"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_unkonwn_hostgroup] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_unkonwn_hostgroup] business_rule invalid"
))
self.assert_any_cfg_log_match(re.escape(
"[service::bprule_unkonwn_hostgroup] business rule got an empty result "
"for pattern 'g:hostgroup_03,srv1'"
))
self.assert_any_cfg_log_match(re.escape(
"services configuration is incorrect!"
))
def test_business_rules_bad_realm_conf(self):
""" Configuration is not correct because of a bad configuration in business rules realms
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/business_rules_bad_realm_conf.cfg')
assert not self.conf_is_correct
self.show_configuration_logs()
assert len(self.configuration_warnings) == 6
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no poller is defined for this realm."
))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no broker is defined for this realm."
))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no reactionner is defined for this realm."
))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no receiver is defined for this realm."
))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm2' but no scheduler is defined for this realm."
))
self.assert_any_cfg_log_match(re.escape(
"Some hosts exist in the realm 'Realm1' but no receiver is defined for this realm."
))
# assert len(self.configuration_errors) == 9
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::up] got the default realm but it has some hosts that are from different "
"realms: "
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::up] Configuration is incorrect; from:"
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::hostgroup_01] got the default realm but it has some hosts that are from "
"different realms: "
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::hostgroup_01] Configuration is incorrect; from:"
))
self.assert_any_cfg_log_match(re.escape(
"hostgroups configuration is incorrect!"
))
self.assert_any_cfg_log_match(re.escape(
"Error: the realm configuration of your hosts is not correct because "
"there is more than one realm in one pack (host relations):"
))
self.assert_any_cfg_log_match(re.escape(
" -> the host test_host_realm1 is in the realm Realm1"
))
self.assert_any_cfg_log_match(re.escape(
" -> the host test_host_realm2 is in the realm Realm2"
))
self.assert_any_cfg_log_match(re.escape(
"There are 4 hosts defined, and 2 hosts dispatched in the realms. "
"Some hosts have been ignored"
))
def test_bad_satellite_realm_conf(self):
""" Configuration is not correct because a daemon configuration has an unknown realm
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_default.cfg', 'cfg/config/alignak-bad-realms.ini')
self.show_logs()
assert not self.conf_is_correct
self.show_configuration_logs()
self.assert_any_cfg_log_match(
"The poller 'poller-master' is affected to an unknown realm: '")
self.assert_any_cfg_log_match(
"The broker 'broker-master' is affected to an unknown realm: '")
self.assert_any_cfg_log_match(
"The reactionner 'reactionner-master' is affected to an unknown realm: '")
self.assert_any_cfg_log_match(
"The receiver 'receiver-master' is affected to an unknown realm: '")
self.assert_any_cfg_log_match(
"The scheduler 'scheduler-master' is affected to an unknown realm: '")
self.assert_any_cfg_log_match(
"The realm All has 2 hosts but no scheduler!")
self.assert_any_cfg_log_match(
"Some hosts exist in the realm 'All' but no poller is defined for this realm.")
self.assert_any_cfg_log_match(
"Some hosts exist in the realm 'All' but no broker is defined for this realm.")
self.assert_any_cfg_log_match(
"Some hosts exist in the realm 'All' but no reactionner is defined for this realm.")
self.assert_any_cfg_log_match(
"Some hosts exist in the realm 'All' but no receiver is defined for this realm.")
self.assert_any_cfg_log_match(
"Some hosts exist in the realm 'All' but no scheduler is defined for this realm.")
def test_bad_service_interval(self):
""" Configuration is not correct because of a bad check_interval in service
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_bad_check_interval_in_service.cfg')
assert not self.conf_is_correct
self.show_configuration_logs()
self.assert_any_cfg_log_match(re.escape(
"[service::fake svc1] Configuration is incorrect; from:"
))
self.assert_any_cfg_log_match(re.escape(
r"[service::fake svc1] Error while pythonizing parameter 'check_interval': "
))
def test_config_contacts(self):
""" Test contacts configuration
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
assert self.conf_is_correct
contact = self._arbiter.conf.contacts.find_by_name('test_contact')
assert contact.contact_name == 'test_contact'
assert contact.email == 'nobody@localhost'
assert contact.customs == {'_VAR2': 'text', '_VAR1': '10'}
def test_config_hosts(self):
""" Test hosts initial states
:return: None
"""
self.setup_with_file('cfg/config/host_config_all.cfg')
assert self.conf_is_correct
host = self._arbiter.conf.hosts.find_by_name('test_host_000')
assert 'DOWN' == host.state
host = self._arbiter.conf.hosts.find_by_name('test_host_001')
assert 'UNREACHABLE' == host.state
host = self._arbiter.conf.hosts.find_by_name('test_host_002')
assert 'UP' == host.state
host = self._arbiter.conf.hosts.find_by_name('test_host_003')
assert 'UP' == host.state
def test_config_hosts_names(self):
""" Test hosts allowed hosts names:
- Check that it is allowed to have a host with the "__ANTI-VIRG__"
substring in its hostname
- Check that the semicolon is a comment delimiter
- Check that it is possible to have a host with a semicolon in its hostname:
The consequences of this aren't tested. We try just to send a command but
other programs which send commands probably don't escape the semicolon.
:return: None
"""
self.setup_with_file('cfg/config/alignak_antivirg.cfg', dispatching=True)
assert self.conf_is_correct, "Configuration is not valid"
# host test_host_0
host = self._arbiter.conf.hosts.find_by_name('test_host_0')
print("Host: %s / %s" % (host.get_name(), host.__dict__))
host = self._scheduler.pushed_conf.hosts.find_by_name('test_host_0')
print("Host: %s / %s" % (host.get_name(), host.__dict__))
# try to get the host
# if it is not possible to get the host, it is probably because
# "__ANTI-VIRG__" has been replaced by ";"
hst = self._arbiter.conf.hosts.find_by_name('test__ANTI-VIRG___0')
assert hst is not None, "host 'test__ANTI-VIRG___0' not found"
assert hst.is_correct(), "config of host '%s' is not correct" % hst.get_name()
# try to get the host
hst = self._arbiter.conf.hosts.find_by_name('test_host_1')
assert hst is not None, "host 'test_host_1' not found"
assert hst.is_correct(), "config of host '%s' is not true" % (hst.get_name())
# try to get the host
hst = self._arbiter.conf.hosts.find_by_name('test_host_2;with_semicolon')
assert hst is not None, "host 'test_host_2;with_semicolon' not found"
assert hst.is_correct(), "config of host '%s' is not true" % hst.get_name()
host = self._arbiter.conf.hosts.find_by_name("test_host_2;with_semicolon")
assert host is not None, "host 'test_host_2;with_semicolon' not found"
# This host has no defined check_command, then it will always keep its initial state!
assert host.initial_state == 'd'
assert 'DOWN' == host.state
# We can also send a command by escaping the semicolon.
command = r'[%lu] PROCESS_HOST_CHECK_RESULT;test_host_2\;with_semicolon;0;I should be up' \
% (time.time())
self._scheduler.run_external_commands([command])
self.external_command_loop()
assert 'DOWN' == host.state
def test_config_hosts_default_check_command(self):
""" Test hosts default check command
- Check that an host without declared command uses the default _internal_host_up
:return: None
"""
self.setup_with_file('cfg/config/hosts_commands.cfg')
self.show_logs()
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
command = self._arbiter.conf.commands.find_by_name('_internal_host_up')
print("Command: %s" % command)
assert command
host = self._arbiter.conf.hosts.find_by_name('test_host')
assert host.check_command is None
def test_config_services(self):
""" Test services initial states
:return: None
"""
self.setup_with_file('cfg/config/service_config_all.cfg')
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
'test_host_0', 'test_service_0')
assert 'WARNING' == svc.state
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
'test_host_0', 'test_service_1')
assert 'UNKNOWN' == svc.state
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
'test_host_0', 'test_service_2')
assert 'CRITICAL' == svc.state
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
'test_host_0', 'test_service_3')
assert 'OK' == svc.state
svc = self._arbiter.conf.services.find_srv_by_name_and_hostname(
'test_host_0', 'test_service_4')
assert 'OK' == svc.state
def test_host_unreachable_values(self):
""" Test unreachable value in:
* flap_detection_options
* notification_options
* snapshot_criteria
:return: None
"""
self.setup_with_file('cfg/config/host_unreachable.cfg')
assert self.conf_is_correct
# No error messages
assert len(self.configuration_errors) == 0
# No warning messages
assert len(self.configuration_warnings) == 0
host0 = self._arbiter.conf.hosts.find_by_name('host_A')
host1 = self._arbiter.conf.hosts.find_by_name('host_B')
# assert ['d', 'x', 'r', 'f', 's'] == host0.notification_options
assert 5 == len(host0.notification_options)
assert 'x' in host0.notification_options
assert 's' in host0.notification_options
assert 'r' in host0.notification_options
assert 'd' in host0.notification_options
assert 'f' in host0.notification_options
assert ['o', 'd', 'x'] == host0.flap_detection_options
assert ['d', 'x'] == host0.snapshot_criteria
# self.assertEqual('x', host0.initial_state)
# self.assertEqual('x', host0.freshness_state)
assert 1 == len(host0.act_depend_of_me)
assert ['d', 'x'] == host0.act_depend_of_me[0][1]
assert 1 == len(host0.chk_depend_of_me)
assert ['x'] == host0.chk_depend_of_me[0][1]
assert 1 == len(host1.act_depend_of)
assert ['d', 'x'] == host1.act_depend_of[0][1]
assert 1 == len(host1.chk_depend_of)
assert ['x'] == host1.chk_depend_of[0][1]
def test_macro_modulation(self):
""" Detect macro modulation configuration errors
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/macros_modulation_broken.cfg')
assert not self.conf_is_correct
# MM without macro definition
self.assert_any_cfg_log_match(re.escape(
"[macromodulation::MODULATION2] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[macromodulation::MODULATION2] The modulation_period named '24x7' is unknown"
))
self.assert_any_cfg_log_match(re.escape(
"[macromodulation::MODULATION2] contains no macro definition"
))
# MM without name
self.assert_any_cfg_log_match(re.escape(
"[macromodulation::Unnamed] Configuration is incorrect; "
))
# self.assert_any_cfg_log_match(
# "a macromodulation item has been defined without macromodulation_name, "
# )
self.assert_any_cfg_log_match(re.escape(
"[macromodulation::Unnamed] The modulation_period named '24x7' is unknown"
))
self.assert_any_cfg_log_match(re.escape(
"[macromodulation::Unnamed] macromodulation_name required property is missing"
))
self.assert_any_cfg_log_match(
"macromodulations configuration is incorrect!"
)
def test_checks_modulation(self):
""" Detect checks modulation configuration errors
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/checks_modulation_broken.cfg')
assert not self.conf_is_correct
# CM without check_command definition
self.assert_any_cfg_log_match(re.escape(
"[checkmodulation::MODULATION] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[checkmodulation::MODULATION] a check_command is missing"
))
# MM without name
self.assert_any_cfg_log_match(re.escape(
"[checkmodulation::Unnamed] Configuration is incorrect; "
))
# self.assert_any_cfg_log_match(
# "a checkmodulation item has been defined without checkmodulation_name, "
# )
self.assert_any_cfg_log_match(re.escape(
"[checkmodulation::Unnamed] The check_period named '24x7' is unknown"
))
self.assert_any_cfg_log_match(re.escape(
"[checkmodulation::Unnamed] checkmodulation_name required property is missing"
))
self.assert_any_cfg_log_match(
"checkmodulations configuration is incorrect!"
)
def test_business_impact__modulation(self):
""" Detect business impact modulation configuration errors
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/config/businesssimpact_modulation_broken.cfg')
assert not self.conf_is_correct
# MM without macro definition
self.assert_any_cfg_log_match(re.escape(
"[businessimpactmodulation::CritMod] The modulation_period named '24x7' is unknown"
))
self.assert_any_cfg_log_match(re.escape(
"[businessimpactmodulation::CritMod] Configuration is incorrect; from: "
))
self.assert_any_cfg_log_match(re.escape(
"[businessimpactmodulation::CritMod] business_impact required property is missing"
))
# BIM without name
self.assert_any_cfg_log_match(re.escape(
"[businessimpactmodulation::Unnamed] The modulation_period named '24x7' is unknown"
))
self.assert_any_cfg_log_match(re.escape(
"[businessimpactmodulation::Unnamed] business_impact_modulation_name required property "
"is missing"
))
self.assert_any_cfg_log_match(re.escape(
"[businessimpactmodulation::Unnamed] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"businessimpactmodulations configuration is incorrect!"
))
|
class TestConfig(AlignakTest):
'''
This class tests the configuration
'''
def setUp(self):
pass
def test_config_ok(self):
''' Default shipped configuration has no loading problems ...
:return: None
'''
pass
def test_config_ok_2(self):
''' Default shipped configuration has no loading problems ... even when using the
default shipped ini file
:return: None
'''
pass
def test_config_test_ok(self):
''' Default test configuration has no loading problems ...
:return: None
'''
pass
def test_host_name_pattern(self):
''' Default test configuration has no loading problems ...
:return: None
'''
pass
def test_config_conf_inner_properties(self):
''' Default configuration has no loading problems ...
and inner default properties are correctly valued
:return: None
'''
pass
def test_config_conf_inner_properties_named_alignak(self):
''' Default configuration with an alignak_name property
:return: None
'''
pass
def test_config_ok_no_declared_daemons(self):
''' Default configuration has no loading problems ... but no daemons are defined
The arbiter will create default daemons except for the receiver.
:return: None
'''
pass
def test_symlinks(self):
''' Test a configuration with symlinks to files
:return: None
'''
pass
def test_define_syntax(self):
''' Test that define{} syntax is correctly checked: spaces, multi-lines, white-spaces
do not raise any error ...
:return: None
'''
pass
def test_plus_syntax(self):
''' Test that plus (+) is not allowed for single value properties
:return: None
'''
pass
def test_underscore_syntax(self):
''' Test that underscore (_) is not allowed for list value properties
:return: None
'''
pass
def test_definition_order(self):
''' Test element definition order
An element (host, service, ...) can be defined several times then the definition_order
will be used to choose which definition is the to be used one...
Here, the 'same_service' is defined 3 times but the 'general1' command one will be
retained rather than other because have the lower definition_order ...
:return: None
'''
pass
def test_service_not_hostname(self):
''' Test the 'not hostname' syntax
The service test_ok_0 is applied with a host_group on "test_host_0","test_host_1"
but have a host_name with !"test_host_1" so it will only be attached to "test_host_0"
:return: None
'''
pass
def test_service_inheritance(self):
''' Test services inheritance
Services are attached to hosts thanks to template inheritance
SSH services are created from a template and attached to an host
svc_inherited is created from a service template linked to an host template with a simple
host declaration
:return: None
'''
pass
def test_service_templating_inheritance(self):
''' Test services inheritance
Services are attached to hosts thanks to host/service template relation
:return: None
'''
pass
def test_service_with_no_host(self):
''' A service not linked to any host raises an error
:return: None
'''
pass
def test_bad_template_use_itself(self):
''' Detect a template that uses itself as a template
This test host use template but template is itself
:return: None
'''
pass
def test_use_undefined_template(self):
''' Test unknown template detection for host and service
:return: None
'''
pass
def test_broken_configuration(self):
''' Configuration is not correct because of a wrong relative path in the main config file
:return: None
'''
pass
def test_malformed_parameters(self):
''' Configuration is not correct because of malformed parameters
:return: None
'''
pass
def test_nagios_parameters(self):
'''Configuration has some old nagios parameters
:return: None
'''
pass
def test_nagios_parameters_2(self):
'''Configuration has some old nagios parameters - some are not raising a configuration error
:return: None
'''
pass
def test_broken_configuration_2(self):
''' Configuration is not correct because of a non-existing path
:return: None
'''
pass
def test_bad_timeperiod(self):
''' Test bad timeperiod configuration
:return: None
'''
pass
def test_bad_contact(self):
''' Test a service with an unknown contact
:return: None
'''
pass
def test_bad_notification_period(self):
''' Configuration is not correct because of an unknown notification_period in a service
:return: None
'''
pass
def test_bad_realm_conf(self):
''' Configuration is not correct because of an unknown realm member in realm and
an unknown realm in a host
This test do not always pass! This problem is due to the unordered configuration reading.
Sometimes, the hosts are parsed before the realms and sometimes the realms are parsed
before the hosts.
According to the order in which errors are detected, the reported error messages are not
the same!
To avoid such a problem, the realm containing an unknown member for this test must
not be used in an host configuration :)
:return: None
'''
pass
def test_business_rules_incorrect(self):
''' Business rules use services which don't exist.
We want the arbiter to output an error message and exit
in a controlled manner.
'''
pass
def test_business_rules_hostgroup_expansion_errors(self):
''' Configuration is not correct because of a bad syntax in BR hostgroup expansion '''
pass
def test_business_rules_bad_realm_conf(self):
''' Configuration is not correct because of a bad configuration in business rules realms
:return: None
'''
pass
def test_bad_satellite_realm_conf(self):
''' Configuration is not correct because a daemon configuration has an unknown realm
:return: None
'''
pass
def test_bad_service_interval(self):
''' Configuration is not correct because of a bad check_interval in service
:return: None
'''
pass
def test_config_contacts(self):
''' Test contacts configuration
:return: None
'''
pass
def test_config_hosts(self):
''' Test hosts initial states
:return: None
'''
pass
def test_config_hosts_names(self):
''' Test hosts allowed hosts names:
- Check that it is allowed to have a host with the "__ANTI-VIRG__"
substring in its hostname
- Check that the semicolon is a comment delimiter
- Check that it is possible to have a host with a semicolon in its hostname:
The consequences of this aren't tested. We try just to send a command but
other programs which send commands probably don't escape the semicolon.
:return: None
'''
pass
def test_config_hosts_default_check_command(self):
''' Test hosts default check command
- Check that an host without declared command uses the default _internal_host_up
:return: None
'''
pass
def test_config_services(self):
''' Test services initial states
:return: None
'''
pass
def test_host_unreachable_values(self):
''' Test unreachable value in:
* flap_detection_options
* notification_options
* snapshot_criteria
:return: None
'''
pass
def test_macro_modulation(self):
''' Detect macro modulation configuration errors
:return: None
'''
pass
def test_checks_modulation(self):
''' Detect checks modulation configuration errors
:return: None
'''
pass
def test_business_impact__modulation(self):
''' Detect business impact modulation configuration errors
:return: None
'''
pass
| 43 | 42 | 32 | 4 | 21 | 8 | 1 | 0.37 | 1 | 3 | 0 | 0 | 42 | 0 | 42 | 97 | 1,402 | 207 | 870 | 88 | 827 | 325 | 581 | 88 | 538 | 6 | 2 | 2 | 56 |
4,055 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/dependencynode.py
|
alignak.dependencynode.DependencyNode
|
class DependencyNode(object):
"""
DependencyNode is a node class for business_rule expression(s)
"""
def __init__(self, params=None, parsing=False): # pylint: disable=unused-argument
self.operand = None
self.sons = []
# Of: values are a triple OK,WARN,CRIT
self.of_values = ('0', '0', '0')
self.is_of_mul = False
self.configuration_errors = []
self.not_value = False
if params is not None:
if 'operand' in params:
self.operand = params['operand']
if 'sons' in params:
self.sons = [unserialize(elem) for elem in params['sons']]
# Of: values are a triple OK,WARN,CRIT
if 'of_values' in params:
self.of_values = tuple(params['of_values'])
if 'is_of_mul' in params:
self.is_of_mul = params['is_of_mul']
if 'not_value' in params:
self.not_value = params['not_value']
def __repr__(self): # pragma: no cover
return '"Op:%s Val:%s Sons:[\'%s\'] IsNot:%s"' % (self.operand, self.of_values,
','.join([str(s) for s in self.sons]),
self.not_value)
__str__ = __repr__
def serialize(self, no_json=True, printing=False):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes except the sons that are serialized
:return: json representation of a DependencyNode
:rtype: dict
"""
return {
'operand': self.operand,
'sons': [serialize(elem, no_json=no_json, printing=printing)
for elem in self.sons],
'of_values': self.of_values,
'is_of_mul': self.is_of_mul,
'not_value': self.not_value
}
@staticmethod
def get_reverse_state(state):
"""Do a symmetry around 1 of the state ::
* 0 -> 2
* 1 -> 1
* 2 -> 0
* else -> else
:param state: state to reverse
:type state: int
:return: Integer from 0 to 2 (usually)
:rtype: int
"""
# Warning is still warning
if state == 1:
return 1
if state == 0:
return 2
if state == 2:
return 0
# should not go here...
return state
def get_state(self, hosts, services):
"""Get node state by looking recursively over sons and applying operand
:param hosts: list of available hosts to search for
:param services: list of available services to search for
:return: Node state
:rtype: int
"""
# If we are a host or a service, we just got the host/service
# hard state
if self.operand == 'host':
host = hosts[self.sons[0]]
return self.get_host_node_state(host.last_hard_state_id,
host.problem_has_been_acknowledged,
host.in_scheduled_downtime)
if self.operand == 'service':
service = services[self.sons[0]]
return self.get_service_node_state(service.last_hard_state_id,
service.problem_has_been_acknowledged,
service.in_scheduled_downtime)
if self.operand == '|':
return self.get_complex_or_node_state(hosts, services)
if self.operand == '&':
return self.get_complex_and_node_state(hosts, services)
# It's an Xof rule
if self.operand == 'of:':
return self.get_complex_xof_node_state(hosts, services)
# We have an unknown node. Code is not reachable because we validate operands
return 4
def get_host_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
"""Get host node state, simplest case ::
* Handle not value (revert) for host and consider 1 as 2
:return: 0, 1 or 2
:rtype: int
"""
# Make DOWN look as CRITICAL (2 instead of 1)
if state == 1:
state = 2
# If our node is acknowledged or in downtime, state is ok/up
if problem_has_been_acknowledged or in_scheduled_downtime:
state = 0
# Maybe we are a NOT node, so manage this
if self.not_value:
return 0 if state else 2 # Keep the logic of return Down on NOT rules
return state
def get_service_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
"""Get service node state, simplest case ::
* Handle not value (revert) for service
:return: 0, 1 or 2
:rtype: int
"""
# If our node is acknowledged or in downtime, state is ok/up
if problem_has_been_acknowledged or in_scheduled_downtime:
state = 0
# Maybe we are a NOT node, so manage this
if self.not_value:
# Critical -> OK
if state == 2:
return 0
# OK -> CRITICAL (warning is untouched)
if state == 0:
return 2
return state
def get_complex_or_node_state(self, hosts, services):
"""Get state , handle OR aggregation ::
* Get the best state (min of sons)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
"""
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# Next we calculate the best state
best_state = min(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(best_state)
return best_state
def get_complex_and_node_state(self, hosts, services):
"""Get state , handle AND aggregation ::
* Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
"""
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# Next we calculate the worst state
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
def get_complex_xof_node_state(self, hosts, services):
# pylint: disable=too-many-locals, too-many-return-statements, too-many-branches
"""Get state , handle X of aggregation ::
* Count the number of OK, WARNING, CRITICAL
* Try too apply, in this order, Critical, Warning, OK rule
* Return the code for first match (2, 1, 0)
* If no rule apply, return OK for simple X of and worst state for multiple X of
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
TODO: Looks like the last if does the opposite of what the comment says
"""
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# We search for OK, WARNING or CRITICAL applications
# And we will choice between them
nb_search_ok = self.of_values[0]
nb_search_warn = self.of_values[1]
nb_search_crit = self.of_values[2]
# We look for each application
nb_sons = len(states)
nb_ok = nb_warn = nb_crit = 0
for state in states:
if state == 0:
nb_ok += 1
elif state == 1:
nb_warn += 1
elif state == 2:
nb_crit += 1
def get_state_for(nb_tot, nb_real, nb_search):
"""Check if there is enough value to apply this rule
:param nb_tot: total number of value
:type nb_tot: int
:param nb_real: number of value that apply for this rule
:type nb_real: int
:param nb_search: max value to apply this rule (can be a percent)
:type nb_search: int
:return: True if the rule is effective (roughly nb_real > nb_search), False otherwise
:rtype: bool
"""
if nb_search.endswith('%'):
nb_search = int(nb_search[:-1])
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(100 + nb_search, 0)
apply_for = float(nb_real) / nb_tot * 100 >= nb_search
else:
nb_search = int(nb_search)
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(nb_tot + nb_search, 0)
apply_for = nb_real >= nb_search
return apply_for
ok_apply = get_state_for(nb_sons, nb_ok, nb_search_ok)
warn_apply = get_state_for(nb_sons, nb_warn + nb_crit, nb_search_warn)
crit_apply = get_state_for(nb_sons, nb_crit, nb_search_crit)
# return the worst state that apply
if crit_apply:
if self.not_value:
return self.get_reverse_state(2)
return 2
if warn_apply:
if self.not_value:
return self.get_reverse_state(1)
return 1
if ok_apply:
if self.not_value:
return self.get_reverse_state(0)
return 0
# Maybe even OK is not possible, if so, it depends if the admin
# ask a simple form Xof: or a multiple one A,B,Cof:
# the simple should give OK, the mult should give the worst state
if self.is_of_mul:
if self.not_value:
return self.get_reverse_state(0)
return 0
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
def list_all_elements(self):
"""Get all host/service uuid in our node and below
:return: list of hosts/services uuids
:rtype: list
"""
res = []
# We are a host/service
if self.operand in ['host', 'service']:
return [self.sons[0]]
for son in self.sons:
res.extend(son.list_all_elements())
# and returns a list of unique uuids
return list(set(res))
def switch_zeros_of_values(self):
"""If we are a of: rule, we can get some 0 in of_values,
if so, change them with NB sons instead
:return: None
"""
nb_sons = len(self.sons)
# Need a list for assignment
new_values = list(self.of_values)
for i in [0, 1, 2]:
if new_values[i] == '0':
new_values[i] = str(nb_sons)
self.of_values = tuple(new_values)
def is_valid(self):
"""Check if all leaves are correct (no error)
:return: True if correct, otherwise False
:rtype: bool
"""
valid = True
if not self.sons:
valid = False
else:
for son in self.sons:
if isinstance(son, DependencyNode) and not son.is_valid():
self.configuration_errors.extend(son.configuration_errors)
valid = False
return valid
|
class DependencyNode(object):
'''
DependencyNode is a node class for business_rule expression(s)
'''
def __init__(self, params=None, parsing=False):
pass
def __repr__(self):
pass
def serialize(self, no_json=True, printing=False):
'''This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes except the sons that are serialized
:return: json representation of a DependencyNode
:rtype: dict
'''
pass
@staticmethod
def get_reverse_state(state):
'''Do a symmetry around 1 of the state ::
* 0 -> 2
* 1 -> 1
* 2 -> 0
* else -> else
:param state: state to reverse
:type state: int
:return: Integer from 0 to 2 (usually)
:rtype: int
'''
pass
def get_state(self, hosts, services):
'''Get node state by looking recursively over sons and applying operand
:param hosts: list of available hosts to search for
:param services: list of available services to search for
:return: Node state
:rtype: int
'''
pass
def get_host_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
'''Get host node state, simplest case ::
* Handle not value (revert) for host and consider 1 as 2
:return: 0, 1 or 2
:rtype: int
'''
pass
def get_service_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
'''Get service node state, simplest case ::
* Handle not value (revert) for service
:return: 0, 1 or 2
:rtype: int
'''
pass
def get_complex_or_node_state(self, hosts, services):
'''Get state , handle OR aggregation ::
* Get the best state (min of sons)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
'''
pass
def get_complex_and_node_state(self, hosts, services):
'''Get state , handle AND aggregation ::
* Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
'''
pass
def get_complex_xof_node_state(self, hosts, services):
'''Get state , handle X of aggregation ::
* Count the number of OK, WARNING, CRITICAL
* Try too apply, in this order, Critical, Warning, OK rule
* Return the code for first match (2, 1, 0)
* If no rule apply, return OK for simple X of and worst state for multiple X of
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
TODO: Looks like the last if does the opposite of what the comment says
'''
pass
def get_state_for(nb_tot, nb_real, nb_search):
'''Check if there is enough value to apply this rule
:param nb_tot: total number of value
:type nb_tot: int
:param nb_real: number of value that apply for this rule
:type nb_real: int
:param nb_search: max value to apply this rule (can be a percent)
:type nb_search: int
:return: True if the rule is effective (roughly nb_real > nb_search), False otherwise
:rtype: bool
'''
pass
def list_all_elements(self):
'''Get all host/service uuid in our node and below
:return: list of hosts/services uuids
:rtype: list
'''
pass
def switch_zeros_of_values(self):
'''If we are a of: rule, we can get some 0 in of_values,
if so, change them with NB sons instead
:return: None
'''
pass
def is_valid(self):
'''Check if all leaves are correct (no error)
:return: True if correct, otherwise False
:rtype: bool
'''
pass
| 16 | 13 | 25 | 3 | 13 | 9 | 5 | 0.73 | 1 | 6 | 0 | 0 | 12 | 6 | 13 | 13 | 338 | 51 | 168 | 48 | 152 | 122 | 148 | 47 | 133 | 15 | 1 | 3 | 63 |
4,056 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestEscalation
|
class TestEscalation(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['escalation_name', 'first_notification', 'last_notification',
'first_notification_time', 'last_notification_time']
properties = dict([
('host_name', ''),
('hostgroup_name', ''),
('service_description', ''),
('contact_groups', []),
('contacts', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('notification_interval', -1),
('escalation_period', ''),
('escalation_options', ['d','x','r','w','c']),
])
def setUp(self):
super(TestEscalation, self).setUp()
from alignak.objects.escalation import Escalation
self.item = Escalation({}, parsing=True)
|
class TestEscalation(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 27 | 4 | 23 | 7 | 20 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,057 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestContactgroup
|
class TestContactgroup(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['contactgroup_name']
properties = dict([
('alias', ''),
('members', []),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
# ('unknown_members', []),
('contactgroup_members', []),
])
def setUp(self):
super(TestContactgroup, self).setUp()
from alignak.objects.contactgroup import Contactgroup
self.item = Contactgroup({}, parsing=True)
|
class TestContactgroup(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0.06 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 22 | 4 | 17 | 7 | 14 | 1 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,058 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/brokerlink.py
|
alignak.objects.brokerlink.BrokerLink
|
class BrokerLink(SatelliteLink):
"""
Class to manage the broker information
"""
my_type = 'broker'
my_name_property = "%s_name" % my_type
properties = SatelliteLink.properties.copy()
properties.update({
'type':
StringProp(default=u'broker', fill_brok=[FULL_STATUS], to_send=True),
'broker_name':
StringProp(default='', fill_brok=[FULL_STATUS]),
'port':
IntegerProp(default=7772, fill_brok=[FULL_STATUS], to_send=True),
'initialized':
BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True),
})
def prepare_for_conf(self):
"""Initialize the pushed configuration dictionary
with the inner properties that are to be propagated to the satellite link.
:return: None
"""
super(BrokerLink, self).prepare_for_conf()
self.cfg.update({
'satellites': {
'receivers': {},
'pollers': {},
'reactionners': {}
}
})
|
class BrokerLink(SatelliteLink):
'''
Class to manage the broker information
'''
def prepare_for_conf(self):
'''Initialize the pushed configuration dictionary
with the inner properties that are to be propagated to the satellite link.
:return: None
'''
pass
| 2 | 2 | 15 | 2 | 9 | 4 | 1 | 0.3 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 71 | 34 | 4 | 23 | 5 | 21 | 7 | 8 | 5 | 6 | 1 | 4 | 0 | 1 |
4,059 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/brokerlink.py
|
alignak.objects.brokerlink.BrokerLinks
|
class BrokerLinks(SatelliteLinks):
"""
Class to manage list of BrokerLink.
BrokerLinks is used to regroup all brokers
"""
inner_class = BrokerLink
|
class BrokerLinks(SatelliteLinks):
'''
Class to manage list of BrokerLink.
BrokerLinks is used to regroup all brokers
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 6 | 0 | 2 | 2 | 1 | 4 | 2 | 2 | 1 | 0 | 3 | 0 | 0 |
4,060 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/businessimpactmodulation.py
|
alignak.objects.businessimpactmodulation.Businessimpactmodulation
|
class Businessimpactmodulation(Item):
"""Businessimpactmodulation class is simply a modulation of the business impact value
(of a Host/Service) during a modulation period.
"""
my_type = 'businessimpactmodulation'
my_name_property = "business_impact_modulation_name"
properties = Item.properties.copy()
properties.update({
'business_impact_modulation_name':
StringProp(),
'business_impact':
IntegerProp(),
'modulation_period':
StringProp(default=''),
})
def __init__(self, params, parsing=True):
super(Businessimpactmodulation, self).__init__(params, parsing=parsing)
self.fill_default()
# No modulation_period, means 24x7
if not self.modulation_period:
self.modulation_period = '24x7'
|
class Businessimpactmodulation(Item):
'''Businessimpactmodulation class is simply a modulation of the business impact value
(of a Host/Service) during a modulation period.
'''
def __init__(self, params, parsing=True):
pass
| 2 | 1 | 8 | 2 | 5 | 1 | 2 | 0.24 | 1 | 1 | 0 | 0 | 1 | 1 | 1 | 35 | 25 | 4 | 17 | 6 | 15 | 4 | 10 | 6 | 8 | 2 | 3 | 1 | 2 |
4,061 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/businessimpactmodulation.py
|
alignak.objects.businessimpactmodulation.Businessimpactmodulations
|
class Businessimpactmodulations(Items):
"""Businessimpactmodulations class allowed to handle easily
several Businessimpactmodulation objects
"""
inner_class = Businessimpactmodulation
def linkify(self, timeperiods):
"""Wrapper for Businessimpactmodulations.linkify_cm_by_tp(timeperiods)
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'modulation_period')
|
class Businessimpactmodulations(Items):
'''Businessimpactmodulations class allowed to handle easily
several Businessimpactmodulation objects
'''
def linkify(self, timeperiods):
'''Wrapper for Businessimpactmodulations.linkify_cm_by_tp(timeperiods)
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
'''
pass
| 2 | 2 | 8 | 1 | 2 | 5 | 1 | 2 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 46 | 15 | 3 | 4 | 3 | 2 | 8 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,062 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestHost
|
class TestHost(PropertiesTester, AlignakTest):
unused_props = []
without_default = [
'host_name',
'address',
'check_period',
'notification_period',
# 'state_id_before_impact'
]
properties = dict([
('alias', ''),
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('display_name', ''),
('address6', ''),
('parents', []),
('hostgroups', []),
('check_command', ''),
('initial_state', 'o'),
('freshness_state', 'x'),
('check_interval', 0),
('max_check_attempts', 1),
('retry_interval', 0),
('active_checks_enabled', True),
('passive_checks_enabled', True),
('check_freshness', False),
('freshness_threshold', 0),
('event_handler', ''),
('event_handler_enabled', False),
('low_flap_threshold', 25),
('high_flap_threshold', 50),
('flap_detection_enabled', True),
('flap_detection_options', ['o','d','x']),
('process_perf_data', True),
('retain_status_information', True),
('retain_nonstatus_information', True),
('contacts', []),
('contact_groups', []),
('notification_interval', 60),
('first_notification_delay', 0),
('notification_options', ['d','x','r','f']),
('notifications_enabled', True),
('stalking_options', []),
('notes', ''),
('notes_url', ''),
('action_url', ''),
('icon_image', ''),
('icon_image_alt', ''),
('icon_set', ''),
('vrml_image', ''),
('statusmap_image', ''),
('2d_coords', ''),
('3d_coords', ''),
('realm', ''),
('poller_tag', 'None'),
('reactionner_tag', 'None'),
('resultmodulations', []),
('business_impact_modulations', []),
('escalations', []),
('maintenance_period', ''),
('business_impact', 2),
('time_to_orphanage', 300),
('trending_policies', []),
('checkmodulations', []),
('macromodulations', []),
('custom_views', []),
('service_overrides', []),
('service_excludes', []),
('service_includes', []),
('business_rule_output_template', ''),
('business_rule_smart_notifications', False),
('business_rule_downtime_as_ack', False),
('labels', []),
('snapshot_interval', 5),
('snapshot_command', ''),
('snapshot_enabled', False),
('snapshot_period', ''),
('snapshot_criteria', ['d','x']),
('business_rule_host_notification_options', []),
('business_rule_service_notification_options', []),
# ('state_id_before_impact', 0)
])
def setUp(self):
super(TestHost, self).setUp()
from alignak.objects.host import Host
self.item = Host({}, parsing=True)
|
class TestHost(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0.02 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 93 | 4 | 87 | 7 | 84 | 2 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,063 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/checkmodulation.py
|
alignak.objects.checkmodulation.CheckModulation
|
class CheckModulation(Item):
"""CheckModulation class is simply a modulation of the check command (of a Host/Service)
during a check_period.
"""
my_type = 'checkmodulation'
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'checkmodulation_name':
StringProp(fill_brok=[FULL_STATUS]),
'check_command':
StringProp(fill_brok=[FULL_STATUS]),
'check_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=[FULL_STATUS]),
})
running_properties = Item.running_properties.copy()
special_properties = ('check_period',)
macros = {}
def __init__(self, params, parsing=True):
# When deserialized, those are dict
if not parsing and 'check_command' in params and isinstance(params['check_command'], dict):
# We recreate the object
self.check_command = CommandCall(params['check_command'])
# And remove prop, to prevent from being overridden
del params['check_command']
super(CheckModulation, self).__init__(params, parsing=parsing)
self.fill_default()
def serialize(self, no_json=True, printing=False):
res = super(CheckModulation, self).serialize()
res['check_command'] = None
if getattr(self, 'check_command', None):
res['check_command'] = self.check_command.serialize(no_json=no_json,
printing=printing)
return res
def get_check_command(self, timeperiods, t_to_go):
"""Get the check_command if we are in the check period modulation
:param t_to_go: time to check if we are in the timeperiod
:type t_to_go:
:return: A check command if we are in the check period, None otherwise
:rtype: alignak.objects.command.Command
"""
if not self.check_period or timeperiods[self.check_period].is_time_valid(t_to_go):
return self.check_command
return None
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
if not hasattr(self, 'check_command'):
self.add_error("[checkmodulation::%s] do not have any check_command defined"
% self.get_name())
state = False
else:
if self.check_command is None:
self.add_error("[checkmodulation::%s] a check_command is missing"
% self.get_name())
state = False
if self.check_command and not self.check_command.is_valid():
self.add_error("[checkmodulation::%s] a check_command is invalid"
% self.get_name())
state = False
# Ok just put None as check_period, means 24x7
if not hasattr(self, 'check_period'):
self.check_period = None
return super(CheckModulation, self).is_correct() and state
|
class CheckModulation(Item):
'''CheckModulation class is simply a modulation of the check command (of a Host/Service)
during a check_period.
'''
def __init__(self, params, parsing=True):
pass
def serialize(self, no_json=True, printing=False):
pass
def get_check_command(self, timeperiods, t_to_go):
'''Get the check_command if we are in the check period modulation
:param t_to_go: time to check if we are in the timeperiod
:type t_to_go:
:return: A check command if we are in the check period, None otherwise
:rtype: alignak.objects.command.Command
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 5 | 3 | 15 | 2 | 9 | 4 | 3 | 0.4 | 1 | 3 | 1 | 0 | 4 | 2 | 4 | 38 | 86 | 16 | 50 | 15 | 45 | 20 | 38 | 15 | 33 | 5 | 3 | 2 | 11 |
4,064 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/dispatcher.py
|
alignak.dispatcher.DispatcherError
|
class DispatcherError(Exception):
"""Exception raised for errors in the configuration dispatching.
Attributes:
msg -- explanation of the error
"""
def __init__(self, msg):
super(DispatcherError, self).__init__()
self.message = msg
def __str__(self): # pragma: no cover
"""Exception to String"""
return "Dispatcher error: %s" % (self.message)
|
class DispatcherError(Exception):
'''Exception raised for errors in the configuration dispatching.
Attributes:
msg -- explanation of the error
'''
def __init__(self, msg):
pass
def __str__(self):
'''Exception to String'''
pass
| 3 | 2 | 3 | 0 | 3 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 12 | 14 | 3 | 6 | 4 | 3 | 6 | 6 | 4 | 3 | 1 | 3 | 0 | 2 |
4,065 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.StandardDaterange
|
class StandardDaterange(AbstractDaterange):
"""StandardDaterange is for standard entry (weekday - weekday)
"""
def __init__(self, params, parsing=True):
"""
Init of StandardDaterange
:param day: one of Daterange.weekdays
:type day: str
:param other:
:type other: str
:return: None
"""
super(StandardDaterange, self).__init__(params, parsing=parsing)
self.other = params['other']
if 'timeranges' in params:
self.timeranges = [Timerange(params=t) for t in params['timeranges']]
else:
self.timeranges = []
for timeinterval in params['other'].split(','):
self.timeranges.append(Timerange(timeinterval.strip()))
self.day = params['day']
def serialize(self, no_json=True, printing=False):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Daterange
:rtype: dict
"""
return {
'day': self.day, 'other': self.other,
'timeranges': [t.serialize() for t in self.timeranges]
}
def is_correct(self):
"""Check if the Daterange is correct : weekdays are valid
:return: True if weekdays are valid, False otherwise
:rtype: bool
"""
valid = self.day in Daterange.weekdays
if not valid:
logger.error("Error: %s is not a valid day", self.day)
# Check also if Daterange is correct.
valid &= super(StandardDaterange, self).is_correct()
return valid
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for StandardDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
self.syear = now.tm_year
self.month = now.tm_mon
self.wday = now.tm_wday
day_id = Daterange.get_weekday_id(self.day)
today_morning = get_start_of_day(now.tm_year, now.tm_mon, now.tm_mday)
tonight = get_end_of_day(now.tm_year, now.tm_mon, now.tm_mday)
day_diff = (day_id - now.tm_wday) % 7
morning = datetime.fromtimestamp(today_morning) + timedelta(days=day_diff)
night = datetime.fromtimestamp(tonight) + timedelta(days=day_diff)
return (int(morning.strftime("%s")), int(night.strftime("%s")))
|
class StandardDaterange(AbstractDaterange):
'''StandardDaterange is for standard entry (weekday - weekday)
'''
def __init__(self, params, parsing=True):
'''
Init of StandardDaterange
:param day: one of Daterange.weekdays
:type day: str
:param other:
:type other: str
:return: None
'''
pass
def serialize(self, no_json=True, printing=False):
'''This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Daterange
:rtype: dict
'''
pass
def is_correct(self):
'''Check if the Daterange is correct : weekdays are valid
:return: True if weekdays are valid, False otherwise
:rtype: bool
'''
pass
def get_start_and_end_time(self, ref=None):
'''Specific function to get start time and end time for StandardDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
'''
pass
| 5 | 5 | 16 | 2 | 8 | 6 | 2 | 0.79 | 1 | 6 | 2 | 0 | 4 | 6 | 4 | 26 | 72 | 11 | 34 | 20 | 29 | 27 | 30 | 20 | 25 | 3 | 3 | 2 | 7 |
4,066 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_daemon_start.py
|
tests.test_daemon_start.Test_Arbiter_Start
|
class Test_Arbiter_Start(TemplateDaemonStart, AlignakTest):
def setUp(self):
super(Test_Arbiter_Start, self).setUp()
daemon_cls = Arbiter
daemon_name = 'my_arbiter'
|
class Test_Arbiter_Start(TemplateDaemonStart, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 6 | 1 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
4,067 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_customs_on_service_hosgroups.py
|
tests.test_customs_on_service_hosgroups.TestCustomsonservicehosgroups
|
class TestCustomsonservicehosgroups(AlignakTest):
"""
Class for testing custom macros on service hostgroups
"""
def setUp(self):
super(TestCustomsonservicehosgroups, self).setUp()
self.setup_with_file('cfg/cfg_customs_on_service_hosgroups.cfg')
# We look for 3 services: on defined as direct on 1 hosts, on other
# on 2 hsots, and a last one on a hostgroup
def test_check_for_custom_copy_on_serice_hostgroups(self):
"""
Test custom macros on service hostgroups
"""
# The one host service
svc_one_host = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_0",
"test_on_1_host")
assert svc_one_host is not None
# The 2 hosts service(s)
svc_two_hosts_1 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_host_0",
"test_on_2_hosts")
assert svc_two_hosts_1 is not None
svc_two_hosts_2 = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_router_0",
"test_on_2_hosts")
assert svc_two_hosts_2 is not None
# Then the one defined on a hostgroup
svc_on_group = self._arbiter.conf.services.find_srv_by_name_and_hostname("test_router_0",
"test_on_group")
assert svc_on_group is not None
# Each one should have customs
assert 'custvalue' == svc_one_host.customs['_CUSTNAME']
assert 'custvalue' == svc_two_hosts_1.customs['_CUSTNAME']
assert 'custvalue' == svc_two_hosts_2.customs['_CUSTNAME']
assert 'custvalue' == svc_on_group.customs['_CUSTNAME']
|
class TestCustomsonservicehosgroups(AlignakTest):
'''
Class for testing custom macros on service hostgroups
'''
def setUp(self):
pass
def test_check_for_custom_copy_on_serice_hostgroups(self):
'''
Test custom macros on service hostgroups
'''
pass
| 3 | 2 | 14 | 1 | 10 | 4 | 1 | 0.57 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 37 | 4 | 21 | 7 | 18 | 12 | 17 | 7 | 14 | 1 | 2 | 0 | 2 |
4,068 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_contactgroup.py
|
tests.test_contactgroup.TestContactGroup
|
class TestContactGroup(AlignakTest):
"""
This class tests the contactgroups
"""
def setUp(self):
super(TestContactGroup, self).setUp()
def test_contactgroup(self):
""" Default configuration has no loading problems ... as of it contactgroups are parsed
correctly
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
assert self.conf_is_correct
def test_look_for_alias(self):
""" Default configuration has no loading problems ... as of it contactgroups are parsed
correctly
:return: None
"""
self.setup_with_file('cfg/contactgroup/alignak_groups_with_no_alias.cfg')
assert self.conf_is_correct
# Find a contactgroup named NOALIAS
cg = self._arbiter.conf.contactgroups.find_by_name("NOALIAS")
assert isinstance(cg, Contactgroup)
assert cg.get_name() == "NOALIAS"
assert cg.alias == ""
def test_contactgroup_members(self):
""" Test if members are linked from group
:return: None
"""
self.setup_with_file('cfg/contactgroup/alignak_contactgroup_members.cfg')
assert self.conf_is_correct
# Found a contactgroup named allhosts_and_groups
cg = self._arbiter.conf.contactgroups.find_by_name(
"allcontacts_and_groups"
)
assert isinstance(cg, Contactgroup)
assert cg.get_name() == "allcontacts_and_groups"
assert len(self._arbiter.conf.contactgroups.get_members_of_group(
"allcontacts_and_groups")) == 2
assert len(cg.get_contacts()) == 2
for cid in cg.get_contacts():
contact = self._arbiter.conf.contacts[cid]
print(contact)
if contact.get_name() == "test_contact":
assert contact.get_groupname() == "another_contact_test"
assert contact.get_groupnames() == "another_contact_test"
# This should match but there is a problem currently
# Todo: fix this cross reference between contacts and contactgroups
# Ongoing PR ...
# if contact.get_name() == "test_contact_2":
# self.assertEqual(contact.get_groupname(), "allcontacts_and_groups")
# self.assertEqual(contact.get_groupnames(), "allcontacts_and_groups")
assert len(cg.get_contactgroup_members()) == 1
def test_members_contactgroup(self):
""" Test if group is linked from the member
:return: None
"""
self.setup_with_file('cfg/contactgroup/alignak_contactgroup_members.cfg')
assert self.conf_is_correct
# Found a contactgroup named allhosts_and_groups
cg = self._arbiter.conf.contactgroups.find_by_name("allcontacts_and_groups")
assert isinstance(cg, Contactgroup)
assert cg.get_name() == "allcontacts_and_groups"
assert len(self._arbiter.conf.contactgroups.get_members_of_group(
"allcontacts_and_groups")) == 2
assert len(cg.get_contacts()) == 2
print("List contactgroup contacts:")
for contact_id in cg.members:
contact = self._arbiter.conf.contacts[contact_id]
print(("Contact: %s" % contact))
assert isinstance(contact, Contact)
if contact.get_name() == 'test_ok_0':
assert len(contact.get_contactgroups()) == 4
for group_id in contact.contactgroups:
group = self._arbiter.conf.contactgroups[group_id]
print(("Group: %s" % group))
assert group.get_name() in [
'ok', 'contactgroup_01', 'contactgroup_02', 'allcontacts_and_groups'
]
assert len(cg.get_contactgroup_members()) == 1
print("List contactgroup groups:")
for group in cg.get_contactgroup_members():
print(("Group: %s" % group))
assert group in [
'test_contact'
]
def test_contactgroup_with_no_contact(self):
""" Allow contactgroups with no hosts
:return: None
"""
self.setup_with_file('cfg/contactgroup/alignak_contactgroup_no_contact.cfg')
assert self.conf_is_correct
assert len(self._arbiter.conf.contactgroups) == 3
for group in self._arbiter.conf.contactgroups:
# contactgroups property returns an object list ... unlike the hostgroups property
# of an host group ...
# group = self._arbiter.conf.contactgroups[group_id]
print(("Group: %s" % group))
# Found a contactgroup named void
cg = self._arbiter.conf.contactgroups.find_by_name("void")
print(("cg: %s" % cg))
assert isinstance(cg, Contactgroup)
assert cg.get_name() == "void"
assert len(self._arbiter.conf.contactgroups.get_members_of_group("void")) == 0
print(("Contacts: %s" % cg.get_contactgroup_members()))
assert len(cg.get_contactgroup_members()) == 0
print(("Contacts: %s" % cg.get_contacts()))
assert len(cg.get_contacts()) == 0
def test_contactgroup_with_space(self):
""" Test that contactgroups can have a name with spaces
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg')
assert self.conf_is_correct
self.nb_contactgroups = len(self._arbiter.conf.contactgroups)
self.setup_with_file('cfg/contactgroup/alignak_contactgroup_with_space.cfg')
assert self.conf_is_correct
# Two more groups than the default configuration
assert len(self._arbiter.conf.contactgroups) == self.nb_contactgroups + 1
assert self._arbiter.conf.contactgroups.find_by_name("test_With Spaces").get_name() == \
"test_With Spaces"
assert self._arbiter.conf.contactgroups.get_members_of_group(
"test_With Spaces") is not []
def _dump_host(self, h):
print("Dumping host", h.get_name())
print(h.contact_groups)
for c in h.contacts:
print("->", self._arbiter.conf.contacts[c].get_name())
def _dump_svc(self, s):
print("Dumping Service", s.get_name())
print(" contact_groups : %s " % s.contact_groups)
for c in s.contacts:
print("->", self._arbiter.conf.contacts[c].get_name())
def test_contactgroups_plus_inheritance(self):
""" Test that contacts groups correctly manage inheritance
:return: None
"""
self.setup_with_file('cfg/contactgroup/alignak_contactgroups_plus_inheritance.cfg')
assert self.conf_is_correct
host0 = self._arbiter.conf.hosts.find_by_name("test_host_0")
# HOST 1 should have 2 group of contacts
# WARNING, it's a string, not the real objects!
self._dump_host(host0)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in host0.contacts]
assert "test_contact_2" in \
[self._arbiter.conf.contacts[c].get_name() for c in host0.contacts]
host2 = self._arbiter.conf.hosts.find_by_name("test_host_2")
self._dump_host(host2)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in host2.contacts]
host3 = self._arbiter.conf.hosts.find_by_name("test_host_3")
self._dump_host(host3)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in host3.contacts]
assert "test_contact_2" in \
[self._arbiter.conf.contacts[c].get_name() for c in host3.contacts]
host4 = self._arbiter.conf.hosts.find_by_name("test_host_4")
self._dump_host(host4)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in host4.contacts]
host5 = self._arbiter.conf.hosts.find_by_name("test_host_5")
self._dump_host(host5)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in host5.contacts]
assert "test_contact_2" in \
[self._arbiter.conf.contacts[c].get_name() for c in host5.contacts]
host6 = self._arbiter.conf.hosts.find_by_name("test_host_6")
self._dump_host(host6)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in host6.contacts]
assert "test_contact_2" in \
[self._arbiter.conf.contacts[c].get_name() for c in host6.contacts]
# Now Let's check service inheritance
svc1 = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_0", "svc_tmplA")
self._dump_svc(svc1)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in svc1.contacts]
svc2 = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_0", "svc_tmplB"
)
self._dump_svc(svc2)
assert "test_contact_2" in \
[self._arbiter.conf.contacts[c].get_name() for c in svc2.contacts]
svc3 = self._arbiter.conf.services.find_srv_by_name_and_hostname(
"test_host_0", "svc_tmplA_tmplB"
)
assert "test_contact_1" in \
[self._arbiter.conf.contacts[c].get_name() for c in svc3.contacts]
assert "test_contact_2" in \
[self._arbiter.conf.contacts[c].get_name() for c in svc3.contacts]
self._dump_svc(svc3)
|
class TestContactGroup(AlignakTest):
'''
This class tests the contactgroups
'''
def setUp(self):
pass
def test_contactgroup(self):
''' Default configuration has no loading problems ... as of it contactgroups are parsed
correctly
:return: None
'''
pass
def test_look_for_alias(self):
''' Default configuration has no loading problems ... as of it contactgroups are parsed
correctly
:return: None
'''
pass
def test_contactgroup_members(self):
''' Test if members are linked from group
:return: None
'''
pass
def test_members_contactgroup(self):
''' Test if group is linked from the member
:return: None
'''
pass
def test_contactgroup_with_no_contact(self):
''' Allow contactgroups with no hosts
:return: None
'''
pass
def test_contactgroup_with_space(self):
''' Test that contactgroups can have a name with spaces
:return: None
'''
pass
def _dump_host(self, h):
pass
def _dump_svc(self, s):
pass
def test_contactgroups_plus_inheritance(self):
''' Test that contacts groups correctly manage inheritance
:return: None
'''
pass
| 11 | 8 | 22 | 3 | 15 | 4 | 2 | 0.29 | 1 | 1 | 0 | 0 | 10 | 1 | 10 | 65 | 234 | 41 | 150 | 34 | 139 | 43 | 121 | 34 | 110 | 5 | 2 | 3 | 19 |
4,069 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_contactdowntimes.py
|
tests.test_contactdowntimes.TestContactDowntime
|
class TestContactDowntime(AlignakTest):
"""
This class test downtime for contacts
"""
def setUp(self):
super(TestContactDowntime, self).setUp()
self.setup_with_file("cfg/cfg_default.cfg", dispatching=True)
self._sched = self._scheduler
def test_contact_downtime(self):
"""
Test contact downtime and brok creation associated
"""
# schedule a 2-minute downtime
# downtime must be active
# consume a good result, sleep for a minute
# downtime must be active
# consume a bad result
# downtime must be active
# no notification must be found in broks
duration = 600
now = time.time()
# downtime valid for the next 2 minutes
test_contact = self._sched.contacts.find_by_name('test_contact')
cmd = "[%lu] SCHEDULE_CONTACT_DOWNTIME;test_contact;%d;%d;lausser;blablub" % (now, now, now + duration)
self._sched.run_external_commands([cmd])
self.external_command_loop()
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.act_depend_of = [] # no hostchecks on critical checkresults
# Change the notif interval, so we can notify as soon as we want
svc.notification_interval = 0.001
host = self._sched.hosts.find_by_name("test_host_0")
host.act_depend_of = [] # ignore the router
# We loop, the downtime will be checked and activated
self.scheduler_loop(1, [[svc, 0, 'OK'], [host, 0, 'UP']])
self.assert_any_event_match('CONTACT DOWNTIME ALERT.*;STARTED')
print("downtime was scheduled. check its activity and the comment\n"*5)
self.assertEqual(1, len(test_contact.downtimes))
downtime = list(test_contact.downtimes.values())[0]
assert downtime.is_in_effect
assert not downtime.can_be_deleted
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
time.sleep(1.0)
assert downtime.is_in_effect
assert not downtime.can_be_deleted
assert svc.state == 'CRITICAL'
assert svc.state_type == 'HARD'
# We should NOT see any service notification
self.assert_no_event_match('SERVICE NOTIFICATION.*;CRITICAL')
# Now we short the downtime a lot so it will be stop at now + 1 sec.
downtime.end_time = time.time() + 1
time.sleep(2)
# We invalidate it with a scheduler loop
self.scheduler_loop(1, [])
# So we should be out now, with a log
self.assert_any_event_match('CONTACT DOWNTIME ALERT.*;STOPPED')
print("\n\nDowntime was ended. Check it is really stopped")
self.assertEqual(0, len(test_contact.downtimes))
for n in list(svc.notifications_in_progress.values()):
print("NOTIF", n, n.t_to_go, time.time())
# Now we want this contact to be really notify!
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notif during a downtime for this contact
time.sleep(1)
self.scheduler_loop(3, [[svc, 2, 'CRITICAL']])
self.assert_any_event_match('SERVICE NOTIFICATION.*;CRITICAL')
for n in list(svc.notifications_in_progress.values()):
print("NOTIF", n, n.t_to_go, time.time(), time.time() - n.t_to_go)
def test_contact_downtime_and_cancel(self):
# schedule a 2-minute downtime
# downtime must be active
# consume a good result, sleep for a minute
# downtime must be active
# consume a bad result
# downtime must be active
# no notification must be found in broks
host = self._sched.hosts.find_by_name("test_host_0")
host.act_depend_of = [] # ignore the router
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.act_depend_of = [] # no hostchecks on critical checkresults
# Change the notif interval, so we can notify as soon as we want
# Not 0 because it will disable the notifications
svc.notification_interval = 0.001
# Freeze the time !
initial_datetime = datetime.datetime(year=2018, month=6, day=1,
hour=18, minute=30, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
assert frozen_datetime() == initial_datetime
now = time.time()
duration = 600
# downtime valid for the next 2 minutes
test_contact = self._sched.contacts.find_by_name('test_contact')
cmd = "[%lu] SCHEDULE_CONTACT_DOWNTIME;test_contact;%d;%d;me;blablabla" \
% (now, now, now + duration)
self._sched.run_external_commands([cmd])
# We loop, the downtime wil be check and activate
self.scheduler_loop(1, [[svc, 0, 'OK'], [host, 0, 'UP']])
self.assert_any_event_match('CONTACT DOWNTIME ALERT.*;STARTED')
print("downtime was scheduled. check its activity and the comment")
assert len(test_contact.downtimes) == 1
downtime = list(test_contact.downtimes.values())[0]
assert downtime.is_in_effect
assert not downtime.can_be_deleted
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
# Ok, we define the downtime like we should, now look at if it does the job: do not
# raise notifications during a downtime for this contact
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
frozen_datetime.tick(delta=datetime.timedelta(seconds=5))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
frozen_datetime.tick(delta=datetime.timedelta(seconds=5))
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
frozen_datetime.tick(delta=datetime.timedelta(seconds=5))
# We should NOT see any service notification
self.assert_no_brok_match('SERVICE NOTIFICATION.*;CRITICAL')
# Time warp
frozen_datetime.tick(delta=datetime.timedelta(minutes=1))
downtime_id = list(test_contact.downtimes)[0]
# OK, Now we cancel this downtime, we do not need it anymore
cmd = "[%lu] DEL_CONTACT_DOWNTIME;%s" % (now, downtime_id)
self._sched.run_external_commands([cmd])
# We check if the downtime is tag as to remove
assert downtime.can_be_deleted
# We really delete it
self.scheduler_loop(1, [])
# So we should be out now, with a log
self.assert_any_event_match('CONTACT DOWNTIME ALERT.*;CANCELLED')
print("Downtime was cancelled")
assert len(test_contact.downtimes) == 0
time.sleep(1)
# Now we want this contact to be really notified
self.scheduler_loop(1, [[svc, 2, 'CRITICAL']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
self.show_actions()
# 2 because it is the second notification, the 1st one was hidden by the downtime !
assert 2 == svc.current_notification_number, 'CRITICAL HARD, but no notifications !'
# Time warp 5 minutes
frozen_datetime.tick(delta=datetime.timedelta(minutes=5))
# The service recovers
self.scheduler_loop(1, [[svc, 0, 'OK']])
# The notifications are created to be launched in the next second when they happen !
# Time warp 1 second
frozen_datetime.tick(delta=datetime.timedelta(seconds=1))
self.scheduler_loop(1)
assert 0 == svc.current_notification_number, 'Ok HARD, no notifications'
self.assert_any_event_match('SERVICE NOTIFICATION.*;OK')
self.assert_any_event_match('SERVICE NOTIFICATION.*;CRITICAL')
|
class TestContactDowntime(AlignakTest):
'''
This class test downtime for contacts
'''
def setUp(self):
pass
def test_contact_downtime(self):
'''
Test contact downtime and brok creation associated
'''
pass
def test_contact_downtime_and_cancel(self):
pass
| 4 | 2 | 63 | 14 | 32 | 18 | 2 | 0.6 | 1 | 4 | 0 | 0 | 3 | 1 | 3 | 58 | 197 | 46 | 97 | 23 | 93 | 58 | 95 | 22 | 91 | 3 | 2 | 1 | 5 |
4,070 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_config_shinken.py
|
tests.test_config_shinken.TestConfigShinken
|
class TestConfigShinken(AlignakTest):
"""
This class tests the configuration
"""
def setUp(self):
super(TestConfigShinken, self).setUp()
# self.set_unit_tests_logger_level()
def test_config_ok(self):
""" Default configuration has no loading problems ...
:return: None
"""
self.setup_with_file('cfg/_shinken/_main.cfg')
self.show_logs()
assert self.conf_is_correct
# No error messages
print(self.configuration_errors)
assert len(self.configuration_errors) == 0
# No warning messages
print(self.configuration_warnings)
assert len(self.configuration_warnings) == 2
# l = [
# u"Some hosts exist in the realm 'France' but no broker is defined for this realm",
# u"Added a broker (broker-France, http://127.0.0.1:7772/) for the realm 'France'",
# u'Host graphite use/inherit from an unknown template: graphite ! from:
# /home/alignak/alignak/test/cfg/_shinken/hosts/graphite.cfg:1'
# ]
self.assert_any_cfg_log_match(re.escape(
"[host::graphite] use/inherit from an unknown template: graphite;"
))
# self.assert_any_cfg_log_match(re.escape(
# "Some hosts exist in the realm 'France' but no broker is defined for this realm"
# ))
# self.assert_any_cfg_log_match(re.escape(
# "Added a broker (broker-France, http://127.0.0.1:7772/) for the realm 'France'"
# ))
# Arbiter named as in the configuration
assert self._arbiter.conf.conf_is_correct
arbiter_link = self._arbiter.conf.arbiters.find_by_name('arbiter-master')
assert arbiter_link is not None
# Scheduler named as in the configuration
assert self._arbiter.conf.conf_is_correct
scheduler_link = self._arbiter.conf.schedulers.find_by_name('scheduler-master')
assert scheduler_link is not None
# Scheduler configuration is ok
# Note tht it may happen that the configuration is not sent to the scheduler-master
# assert self._scheduler.pushed_conf.conf_is_correct
# Broker, Poller, Reactionner named as in the configuration
link = self._arbiter.conf.brokers.find_by_name('broker-master')
assert link is not None
link = self._arbiter.conf.pollers.find_by_name('poller-master')
assert link is not None
link = self._arbiter.conf.reactionners.find_by_name('reactionner-master')
assert link is not None
# Receiver - no default receiver created
link = self._arbiter.conf.receivers.find_by_name('receiver-master')
assert link is not None
for item in self._arbiter.conf.commands:
print("Command: %s" % item)
assert len(self._arbiter.conf.commands) == 108
for item in self._arbiter.conf.timeperiods:
print("Timeperiod: %s" % item)
assert len(self._arbiter.conf.timeperiods) == 4
for item in self._arbiter.conf.contacts:
print("Contact: %s" % item)
assert len(self._arbiter.conf.contacts) == 7
for item in self._arbiter.conf.contactgroups:
print("Contacts group: %s" % item)
assert len(self._arbiter.conf.contactgroups) == 3
for item in self._arbiter.conf.hosts:
print("Host: %s" % item)
assert len(self._arbiter.conf.hosts) == 13
for item in self._arbiter.conf.hostgroups:
print("Hosts group: %s" % item)
assert len(self._arbiter.conf.hostgroups) == 8
for item in self._arbiter.conf.services:
print("Service: %s" % item)
assert len(self._arbiter.conf.services) == 94
for item in self._arbiter.conf.servicegroups:
print("Services group: %s" % item)
assert len(self._arbiter.conf.servicegroups) == 5
|
class TestConfigShinken(AlignakTest):
'''
This class tests the configuration
'''
def setUp(self):
pass
def test_config_ok(self):
''' Default configuration has no loading problems ...
:return: None
'''
pass
| 3 | 2 | 45 | 7 | 26 | 12 | 5 | 0.54 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 57 | 96 | 16 | 52 | 7 | 49 | 28 | 50 | 7 | 47 | 9 | 2 | 1 | 10 |
4,071 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/property.py
|
alignak.property.AddrProp
|
class AddrProp(Property):
"""Address property (host + port)"""
def pythonize(self, val):
"""Convert value into a address ip format::
* If value is a list, try to take the last element
* match ip address and port (if available)
:param val: value to convert
:type val:
:return: address/port corresponding to value
:rtype: dict
"""
val = unique_value(val)
matches = re.match(r"^([^:]*)(?::(\d+))?$", val)
if matches is None:
raise ValueError
addr = {'address': matches.group(1)}
if matches.group(2) is not None:
addr['port'] = int(matches.group(2))
return addr
|
class AddrProp(Property):
'''Address property (host + port)'''
def pythonize(self, val):
'''Convert value into a address ip format::
* If value is a list, try to take the last element
* match ip address and port (if available)
:param val: value to convert
:type val:
:return: address/port corresponding to value
:rtype: dict
'''
pass
| 2 | 2 | 21 | 4 | 9 | 8 | 3 | 0.9 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 4 | 24 | 5 | 10 | 4 | 8 | 9 | 10 | 4 | 8 | 3 | 2 | 1 | 3 |
4,072 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.Timerange
|
class Timerange(AlignakObject):
"""Timerange class provides parsing facilities for time range declaration
"""
def __init__(self, entry=None, params=None, parsing=True):
"""Entry is like 00:00-24:00
:param entry: time range entry
:return: Timerange instance
:rtype: object
"""
if not parsing:
super(Timerange, self).__init__(params, parsing=parsing)
return
super(Timerange, self).__init__(params, parsing=parsing)
if entry is not None:
pattern = r'(\d\d):(\d\d)-(\d\d):(\d\d)'
matches = re.match(pattern, entry)
self.is_valid = matches is not None
if self.is_valid:
self.hstart, self.mstart, self.hend, self.mend = [int(g) for g in matches.groups()]
else:
self.hstart = "00:00"
self.mstart = "00:00"
self.hend = "00:00"
self.mend = "00:00"
else:
self.hstart = params["hstart"]
self.mstart = params["mstart"]
self.hend = params["hend"]
self.mend = params["mend"]
self.is_valid = params["is_valid"]
def serialize(self, no_json=True, printing=False):
"""This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Timerange
:rtype: dict
"""
return {
"hstart": self.hstart, "mstart": self.mstart,
"hend": self.hend, "mend": self.mend,
"is_valid": self.is_valid
}
def __str__(self): # pragma: no cover
return str(self.__dict__)
def get_sec_from_morning(self):
"""Get Timerange start time in seconds (from midnight)
:return: amount of seconds from midnight
:rtype: int
"""
return self.hstart * 3600 + self.mstart * 60
def get_first_sec_out_from_morning(self):
"""Get the first second (from midnight) where we are out of the timerange
:return: seconds from midnight where timerange is not effective
:rtype: int
"""
# If start at 0:0, the min out is the end
if self.hstart == 0 and self.mstart == 0:
return self.hend * 3600 + self.mend * 60
return 0
def is_time_valid(self, timestamp):
"""Check if time is valid for this Timerange
If sec_from_morning is not provided, get the value.
:param timestamp: time to check
:type timestamp: int
:return: True if time is valid (in interval), False otherwise
:rtype: bool
"""
sec_from_morning = get_sec_from_morning(timestamp)
return (self.is_valid and
self.hstart * 3600 + self.mstart * 60 <=
sec_from_morning <=
self.hend * 3600 + self.mend * 60)
def is_correct(self):
"""Getter for is_valid attribute
:return: True if Timerange is valid, False otherwise
:rtype: bool
"""
return self.is_valid
|
class Timerange(AlignakObject):
'''Timerange class provides parsing facilities for time range declaration
'''
def __init__(self, entry=None, params=None, parsing=True):
'''Entry is like 00:00-24:00
:param entry: time range entry
:return: Timerange instance
:rtype: object
'''
pass
def serialize(self, no_json=True, printing=False):
'''This function serialize into a simple dict object.
It is used when transferring data to other daemons over the network (http)
Here we directly return all attributes
:return: json representation of a Timerange
:rtype: dict
'''
pass
def __str__(self):
pass
def get_sec_from_morning(self):
'''Get Timerange start time in seconds (from midnight)
:return: amount of seconds from midnight
:rtype: int
'''
pass
def get_first_sec_out_from_morning(self):
'''Get the first second (from midnight) where we are out of the timerange
:return: seconds from midnight where timerange is not effective
:rtype: int
'''
pass
def is_time_valid(self, timestamp):
'''Check if time is valid for this Timerange
If sec_from_morning is not provided, get the value.
:param timestamp: time to check
:type timestamp: int
:return: True if time is valid (in interval), False otherwise
:rtype: bool
'''
pass
def is_correct(self):
'''Getter for is_valid attribute
:return: True if Timerange is valid, False otherwise
:rtype: bool
'''
pass
| 8 | 7 | 12 | 1 | 6 | 5 | 2 | 0.76 | 1 | 3 | 0 | 0 | 7 | 5 | 7 | 10 | 96 | 18 | 45 | 13 | 37 | 34 | 36 | 13 | 28 | 4 | 2 | 2 | 11 |
4,073 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daemons/reactionnerdaemon.py
|
alignak.daemons.reactionnerdaemon.Reactionner
|
class Reactionner(Satellite):
"""
This class is an application that launches actions for the schedulers
Actions can be:
Notifications
Event handlers
When running the Reactionner will :
Respond to pings from Arbiter
Listen for new configurations from Arbiter
The configuration consists of a list of Schedulers for which
the Reactionner will launch actions for.
"""
do_checks = False # I do not do checks
do_actions = True
my_type = 'reactionner'
properties = Satellite.properties.copy()
properties.update({
'type':
StringProp(default='reactionner'),
'port':
IntegerProp(default=7769)
})
def __init__(self, **kwargs):
"""Reactionner daemon initialisation
:param kwargs: command line arguments
"""
super(Reactionner, self).__init__(kwargs.get('daemon_name',
'Default-reactionner'), **kwargs)
|
class Reactionner(Satellite):
'''
This class is an application that launches actions for the schedulers
Actions can be:
Notifications
Event handlers
When running the Reactionner will :
Respond to pings from Arbiter
Listen for new configurations from Arbiter
The configuration consists of a list of Schedulers for which
the Reactionner will launch actions for.
'''
def __init__(self, **kwargs):
'''Reactionner daemon initialisation
:param kwargs: command line arguments
'''
pass
| 2 | 2 | 7 | 1 | 3 | 3 | 1 | 1.07 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 83 | 33 | 5 | 14 | 6 | 12 | 15 | 8 | 6 | 6 | 1 | 4 | 0 | 1 |
4,074 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daemons/schedulerdaemon.py
|
alignak.daemons.schedulerdaemon.Alignak
|
class Alignak(BaseSatellite):
# pylint: disable=too-many-instance-attributes
"""Scheduler class. Referenced as "app" in most Interface
"""
properties = BaseSatellite.properties.copy()
properties.update({
'type':
StringProp(default='scheduler'),
'port':
IntegerProp(default=7768)
})
def __init__(self, **kwargs):
"""Scheduler daemon initialisation
:param kwargs: command line arguments
"""
super(Alignak, self).__init__(kwargs.get('daemon_name', 'Default-scheduler'), **kwargs)
self.http_interface = SchedulerInterface(self)
self.sched = Scheduler(self)
# stats part
# --- copied from scheduler.py
self.nb_pulled_checks = 0
self.nb_pulled_actions = 0
# self.nb_checks_send = 0
self.nb_pushed_checks = 0
self.nb_pushed_actions = 0
self.nb_pulled_broks = 0
# ---
# And possible links for satellites
self.brokers = {}
self.pollers = {}
self.reactionners = {}
self.receivers = {}
# This because it is the Satellite that has thes properties and I am a Satellite
# todo: change this?
# Broks are stored in each broker link, not locally
# self.broks = []
self.broks_lock = threading.RLock()
# Modules are only loaded one time
self.have_modules = False
self.first_scheduling = False
def give_broks(self, broker_name):
"""Give broks for a specific broker
:param broker_name: broker name to send broks
:type broker_name: str
:return: dict of brok for this broker
:rtype: dict[alignak.brok.Brok]
"""
logger.debug("Broker %s requests my broks list", broker_name)
res = []
if not broker_name:
return res
for broker_link in list(self.brokers.values()):
if broker_name == broker_link.name:
for brok in sorted(broker_link.broks, key=lambda x: x.creation_time):
# Only provide broks that did not yet sent to our external modules
if getattr(brok, 'sent_to_externals', False):
res.append(brok)
brok.got = True
logger.debug("brok: %s", brok)
broker_link.broks = [b for b in broker_link.broks if not getattr(b, 'got', False)]
logger.debug("Providing %d broks to %s", len(res), broker_name)
break
else:
logger.warning("Got a brok request from an unknown broker: %s", broker_name)
return res
def compensate_system_time_change(self, difference): # pragma: no cover,
# pylint: disable=too-many-branches
# not with unit tests
"""Compensate a system time change of difference for all hosts/services/checks/notifs
:param difference: difference in seconds
:type difference: int
:return: None
"""
super(Alignak, self).compensate_system_time_change(difference)
# We only need to change some value
self.program_start = max(0, self.program_start + difference)
if not hasattr(self.sched, "conf"):
# Race condition where time change before getting conf
return
# Then we compensate all host/services
for host in self.sched.hosts:
host.compensate_system_time_change(difference)
for serv in self.sched.services:
serv.compensate_system_time_change(difference)
# Now all checks and actions
for chk in list(self.sched.checks.values()):
# Already launch checks should not be touch
if chk.status == u'scheduled' and chk.t_to_go is not None:
t_to_go = chk.t_to_go
ref = self.sched.find_item_by_id(chk.ref)
new_t = max(0, t_to_go + difference)
timeperiod = self.sched.timeperiods[ref.check_period]
if timeperiod is not None:
# But it's no so simple, we must match the timeperiod
new_t = timeperiod.get_next_valid_time_from_t(new_t)
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
chk.state = u'waitconsume'
chk.exit_status = 2
chk.output = '(Error: there is no available check time after time change!)'
chk.check_time = time.time()
chk.execution_time = 0
else:
chk.t_to_go = new_t
ref.next_chk = new_t
# Now all checks and actions
for act in list(self.sched.actions.values()):
# Already launch checks should not be touch
if act.status == u'scheduled':
t_to_go = act.t_to_go
# Event handler do not have ref
ref_id = getattr(act, 'ref', None)
new_t = max(0, t_to_go + difference)
# Notification should be check with notification_period
if act.is_a == u'notification':
ref = self.sched.find_item_by_id(ref_id)
if ref.notification_period:
# But it's no so simple, we must match the timeperiod
notification_period = self.sched.timeperiods[ref.notification_period]
new_t = notification_period.get_next_valid_time_from_t(new_t)
# And got a creation_time variable too
act.creation_time += difference
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
act.state = 'waitconsume'
act.exit_status = 2
act.output = '(Error: there is no available check time after time change!)'
act.check_time = time.time()
act.execution_time = 0
else:
act.t_to_go = new_t
def do_before_loop(self):
"""Stop the scheduling process"""
if self.sched:
self.sched.stop_scheduling()
def do_loop_turn(self):
"""Scheduler loop turn
Simply run the Alignak scheduler loop
This is called when a configuration got received by the scheduler daemon. As of it,
check if the first scheduling has been done... and manage this.
:return: None
"""
if not self.first_scheduling:
# Ok, now all is initialized, we can make the initial broks
logger.info("First scheduling launched")
_t0 = time.time()
# Program start brok
self.sched.initial_program_status()
# First scheduling
self.sched.schedule()
statsmgr.timer('first_scheduling', time.time() - _t0)
logger.info("First scheduling done")
# Connect to our passive satellites if needed
for satellite in [s for s in list(self.pollers.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
for satellite in [s for s in list(self.reactionners.values()) if s.passive]:
if not self.daemon_connection_init(satellite):
logger.error("Passive satellite connection failed: %s", satellite)
# Ticks are for recurrent function call like consume, del zombies etc
self.sched.ticks = 0
self.first_scheduling = True
# Each loop turn, execute the daemon specific treatment...
# only if the daemon has a configuration to manage
if self.sched.pushed_conf:
# If scheduling is not yet enabled, enable scheduling
if not self.sched.must_schedule:
self.sched.start_scheduling()
self.sched.before_run()
self.sched.run()
else:
logger.warning("#%d - No monitoring configuration to scheduler...",
self.loop_count)
def get_managed_configurations(self):
"""Get the configurations managed by this scheduler
The configuration managed by a scheduler is the self configuration got
by the scheduler during the dispatching.
:return: a dict of scheduler links with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
"""
# for scheduler_link in list(self.schedulers.values()):
# res[scheduler_link.instance_id] = {
# 'hash': scheduler_link.hash,
# 'push_flavor': scheduler_link.push_flavor,
# 'managed_conf_id': scheduler_link.managed_conf_id
# }
res = {}
if self.sched.pushed_conf and self.cur_conf and 'instance_id' in self.cur_conf:
res[self.cur_conf['instance_id']] = {
'hash': self.cur_conf['hash'],
'push_flavor': self.cur_conf['push_flavor'],
'managed_conf_id': self.cur_conf['managed_conf_id']
}
logger.debug("Get managed configuration: %s", res)
return res
def setup_new_conf(self):
# pylint: disable=too-many-statements, too-many-branches, too-many-locals
"""Setup new conf received for scheduler
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
if 'conf_part' not in self.cur_conf:
self.cur_conf['conf_part'] = None
conf_part = self.cur_conf['conf_part']
# Ok now we can save the retention data
if self.sched.pushed_conf is not None:
self.sched.update_retention()
# Get the monitored objects configuration
t00 = time.time()
received_conf_part = None
try:
received_conf_part = unserialize(conf_part)
assert received_conf_part is not None
except AssertionError as exp:
# This to indicate that no configuration is managed by this scheduler...
logger.warning("No managed configuration received from arbiter")
except AlignakClassLookupException as exp: # pragma: no cover
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter",
"_error": str(exp)
}
logger.error(self.new_conf)
logger.error("Back trace of the error:\n%s", traceback.format_exc())
return
except Exception as exp: # pylint: disable=broad-except
# This to indicate that the new configuration is not managed...
self.new_conf = {
"_status": "Cannot un-serialize configuration received from arbiter",
"_error": str(exp)
}
logger.error(self.new_conf)
self.exit_on_exception(exp, str(self.new_conf))
# if not received_conf_part:
# return
logger.info("Monitored configuration %s received at %d. Un-serialized in %d secs",
received_conf_part, t00, time.time() - t00)
# Now we create our pollers, reactionners and brokers
for link_type in ['pollers', 'reactionners', 'brokers']:
if link_type not in self.cur_conf['satellites']:
logger.error("Missing %s in the configuration!", link_type)
continue
my_satellites = getattr(self, link_type, {})
received_satellites = self.cur_conf['satellites'][link_type]
for link_uuid in received_satellites:
rs_conf = received_satellites[link_uuid]
logger.debug("- received %s - %s: %s", rs_conf['instance_id'],
rs_conf['type'], rs_conf['name'])
# Must look if we already had a configuration and save our broks
already_got = rs_conf['instance_id'] in my_satellites
broks = []
actions = {}
wait_homerun = {}
external_commands = {}
running_id = 0
if already_got:
logger.warning("I already got: %s", rs_conf['instance_id'])
# Save some information
running_id = my_satellites[link_uuid].running_id
(broks, actions,
wait_homerun, external_commands) = \
my_satellites[link_uuid].get_and_clear_context()
# Delete the former link
del my_satellites[link_uuid]
# My new satellite link...
new_link = SatelliteLink.get_a_satellite_link(link_type[:-1],
rs_conf)
my_satellites[new_link.uuid] = new_link
logger.info("I got a new %s satellite: %s", link_type[:-1], new_link.name)
new_link.running_id = running_id
new_link.external_commands = external_commands
new_link.broks = broks
new_link.wait_homerun = wait_homerun
new_link.actions = actions
# Replacing the satellite address and port by those defined in satellite_map
if new_link.name in self.cur_conf['override_conf'].get('satellite_map', {}):
override_conf = self.cur_conf['override_conf']
overriding = override_conf.get('satellite_map')[new_link.name]
logger.warning("Do not override the configuration for: %s, with: %s. "
"Please check whether this is necessary!",
new_link.name, overriding)
# First mix conf and override_conf to have our definitive conf
for prop in getattr(self.cur_conf, 'override_conf', []):
logger.debug("Overriden: %s / %s ", prop, getattr(received_conf_part, prop, None))
logger.debug("Overriding: %s / %s ", prop, self.cur_conf['override_conf'])
setattr(received_conf_part, prop, self.cur_conf['override_conf'].get(prop, None))
# Scheduler modules
if not self.have_modules:
try:
logger.debug("Modules configuration: %s", self.cur_conf['modules'])
self.modules = unserialize(self.cur_conf['modules'], no_json=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration")
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
if received_conf_part:
logger.info("Loading configuration...")
# Propagate the global parameters to the configuration items
received_conf_part.explode_global_conf()
# We give the configuration to our scheduler
self.sched.reset()
self.sched.load_conf(self.cur_conf['instance_id'],
self.cur_conf['instance_name'],
received_conf_part)
# Once loaded, the scheduler has an inner pushed_conf object
logger.info("Loaded: %s", self.sched.pushed_conf)
# Update the scheduler ticks according to the daemon configuration
self.sched.update_recurrent_works_tick(self)
# We must update our pushed configuration macros with correct values
# from the configuration parameters
# self.sched.pushed_conf.fill_resource_macros_names_macros()
# Creating the Macroresolver Class & unique instance
m_solver = MacroResolver()
m_solver.init(received_conf_part)
# Now create the external commands manager
# We are an applyer: our role is not to dispatch commands, but to apply them
ecm = ExternalCommandManager(
received_conf_part, 'applyer', self.sched,
received_conf_part.accept_passive_unknown_check_results,
received_conf_part.log_external_commands)
# Scheduler needs to know about this external command manager to use it if necessary
self.sched.external_commands_manager = ecm
# Ok now we can load the retention data
self.sched.retention_load()
# Log hosts/services initial states
self.sched.log_initial_states()
# Create brok new conf
brok = Brok({'type': 'new_conf', 'data': {}})
self.sched.add_brok(brok)
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
if received_conf_part:
# Enable the scheduling process
logger.info("Loaded: %s", self.sched.pushed_conf)
self.sched.start_scheduling()
# Now I have a configuration!
self.have_conf = True
def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear()
def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# Call the base Daemon one
res = super(Alignak, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type, 'monitored_objects': {}})
counters = res['counters']
# Satellites counters
counters['brokers'] = len(self.brokers)
counters['pollers'] = len(self.pollers)
counters['reactionners'] = len(self.reactionners)
counters['receivers'] = len(self.receivers)
if not self.sched:
return res
# # Hosts/services problems counters
# m_solver = MacroResolver()
# counters['hosts_problems'] = m_solver._get_total_host_problems()
# counters['hosts_unhandled_problems'] = m_solver._get_total_host_problems_unhandled()
# counters['services_problems'] = m_solver._get_total_service_problems()
# counters['services_unhandled_problems'] = m_solver._get_total_service_problems_unhandled()
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=details)
res['counters'].update(scheduler_stats['counters'])
scheduler_stats.pop('counters')
res.update(scheduler_stats)
return res
def get_monitoring_problems(self):
"""Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
"""
res = {}
if not self.sched:
return res
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=True)
if 'livesynthesis' in scheduler_stats:
res['livesynthesis'] = scheduler_stats['livesynthesis']
if 'problems' in scheduler_stats:
res['problems'] = scheduler_stats['problems']
return res
def main(self):
"""Main function for Scheduler, launch after the init::
* Init daemon
* Load module manager
* Launch main loop
* Catch any Exception that occurs
:return: None
"""
try:
# Start the daemon mode
if not self.do_daemon_init_and_start():
self.exit_on_error(message="Daemon initialization error", exit_code=3)
# We wait for initial conf
self.wait_for_initial_conf()
if self.new_conf:
# Setup the received configuration
self.setup_new_conf()
# Now the main loop
self.do_main_loop()
logger.info("Exited from the main loop.")
# On main loop exit, call the scheduler after run process
self.sched.after_run()
self.request_stop()
except Exception: # pragma: no cover, this should never happen indeed ;)
self.exit_on_exception(traceback.format_exc())
raise
|
class Alignak(BaseSatellite):
'''Scheduler class. Referenced as "app" in most Interface
'''
def __init__(self, **kwargs):
'''Scheduler daemon initialisation
:param kwargs: command line arguments
'''
pass
def give_broks(self, broker_name):
'''Give broks for a specific broker
:param broker_name: broker name to send broks
:type broker_name: str
:return: dict of brok for this broker
:rtype: dict[alignak.brok.Brok]
'''
pass
def compensate_system_time_change(self, difference):
'''Compensate a system time change of difference for all hosts/services/checks/notifs
:param difference: difference in seconds
:type difference: int
:return: None
'''
pass
def do_before_loop(self):
'''Stop the scheduling process'''
pass
def do_loop_turn(self):
'''Scheduler loop turn
Simply run the Alignak scheduler loop
This is called when a configuration got received by the scheduler daemon. As of it,
check if the first scheduling has been done... and manage this.
:return: None
'''
pass
def get_managed_configurations(self):
'''Get the configurations managed by this scheduler
The configuration managed by a scheduler is the self configuration got
by the scheduler during the dispatching.
:return: a dict of scheduler links with instance_id as key and
hash, push_flavor and configuration identifier as values
:rtype: dict
'''
pass
def setup_new_conf(self):
'''Setup new conf received for scheduler
:return: None
'''
pass
def clean_previous_run(self):
'''Clean variables from previous configuration
:return: None
'''
pass
def get_daemon_stats(self, details=False):
'''Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
'''
pass
def get_monitoring_problems(self):
'''Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
'''
pass
def main(self):
'''Main function for Scheduler, launch after the init::
* Init daemon
* Load module manager
* Launch main loop
* Catch any Exception that occurs
:return: None
'''
pass
| 12 | 12 | 46 | 7 | 25 | 14 | 6 | 0.54 | 1 | 12 | 7 | 0 | 11 | 18 | 11 | 72 | 530 | 93 | 287 | 75 | 275 | 156 | 254 | 74 | 242 | 19 | 3 | 4 | 62 |
4,075 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.CalendarDaterange
|
class CalendarDaterange(Daterange):
"""CalendarDaterange is for calendar entry (YYYY-MM-DD - YYYY-MM-DD)
"""
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for CalendarDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
return (get_start_of_day(self.syear, int(self.smon), self.smday),
get_end_of_day(self.eyear, int(self.emon), self.emday))
|
class CalendarDaterange(Daterange):
'''CalendarDaterange is for calendar entry (YYYY-MM-DD - YYYY-MM-DD)
'''
def get_start_and_end_time(self, ref=None):
'''Specific function to get start time and end time for CalendarDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
'''
pass
| 2 | 2 | 10 | 1 | 3 | 6 | 1 | 2 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 26 | 14 | 2 | 4 | 2 | 2 | 8 | 3 | 2 | 1 | 1 | 4 | 0 | 1 |
4,076 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.MonthDateDaterange
|
class MonthDateDaterange(Daterange):
"""MonthDateDaterange is for month and day entry (month DD - month DD)
"""
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDateDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_offset(self.syear, self.smon, self.smday)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_offset(self.eyear, self.emon, self.emday)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time:
# check for next year
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that start was the last year
day_start = find_day_by_offset(self.syear - 1, self.smon, self.emday)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_offset(self.syear + 1, self.smon, self.smday)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_offset(self.eyear + 1, self.emon, self.emday)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time)
|
class MonthDateDaterange(Daterange):
'''MonthDateDaterange is for month and day entry (month DD - month DD)
'''
def get_start_and_end_time(self, ref=None):
'''Specific function to get start time and end time for MonthDateDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
'''
pass
| 2 | 2 | 38 | 4 | 25 | 10 | 6 | 0.46 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 26 | 42 | 5 | 26 | 10 | 24 | 12 | 24 | 10 | 22 | 6 | 4 | 2 | 6 |
4,077 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.MonthDayDaterange
|
class MonthDayDaterange(Daterange):
"""MonthDayDaterange is for month week day entry (day DD - DD)
"""
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# For the start
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# For the end
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
class MonthDayDaterange(Daterange):
'''MonthDayDaterange is for month week day entry (day DD - DD)
'''
def get_start_and_end_time(self, ref=None):
'''Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
'''
pass
| 2 | 2 | 50 | 8 | 34 | 8 | 8 | 0.29 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 26 | 54 | 9 | 35 | 12 | 33 | 10 | 35 | 12 | 33 | 8 | 4 | 2 | 8 |
4,078 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daterange.py
|
alignak.daterange.MonthWeekDayDaterange
|
class MonthWeekDayDaterange(Daterange):
"""MonthWeekDayDaterange is for month week day entry (weekday DD month - weekday DD month)
"""
def is_correct(self):
"""Check if the Daterange is correct : weekdays are valid
:return: True if weekdays are valid, False otherwise
:rtype: bool
"""
valid = True
valid &= self.swday in range(7)
if not valid:
logger.error("Error: %s is not a valid day", self.swday)
valid &= self.ewday in range(7)
if not valid:
logger.error("Error: %s is not a valid day", self.ewday)
return valid
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_weekday_offset(self.syear, self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_weekday_offset(self.eyear, self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time: # the period is between years
if now_epoch > end_time: # check for next year
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
# it s just that the start was the last year
day_start = find_day_by_weekday_offset(self.syear - 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
# just have to check for next year if necessary
day_start = find_day_by_weekday_offset(self.syear + 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time)
|
class MonthWeekDayDaterange(Daterange):
'''MonthWeekDayDaterange is for month week day entry (weekday DD month - weekday DD month)
'''
def is_correct(self):
'''Check if the Daterange is correct : weekdays are valid
:return: True if weekdays are valid, False otherwise
:rtype: bool
'''
pass
def get_start_and_end_time(self, ref=None):
'''Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple
'''
pass
| 3 | 3 | 29 | 4 | 19 | 7 | 5 | 0.41 | 1 | 1 | 0 | 0 | 2 | 4 | 2 | 27 | 64 | 11 | 39 | 14 | 36 | 16 | 33 | 12 | 30 | 6 | 4 | 2 | 9 |
4,079 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
|
tests.test_aa_properties_default.TestHostdependency
|
class TestHostdependency(PropertiesTester, AlignakTest):
unused_props = []
without_default = ['dependent_host_name', 'host_name']
properties = dict([
('imported_from', 'alignak-self'),
('use', []),
('register', True),
('definition_order', 100),
('name', ''),
('dependent_hostgroup_name', ''),
('hostgroup_name', ''),
('inherits_parent', False),
('execution_failure_criteria', ['n']),
('notification_failure_criteria', ['n']),
('dependency_period', ''),
])
def setUp(self):
super(TestHostdependency, self).setUp()
from alignak.objects.hostdependency import Hostdependency
self.item = Hostdependency({}, parsing=True)
|
class TestHostdependency(PropertiesTester, AlignakTest):
def setUp(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 1 | 1 | 60 | 24 | 4 | 20 | 7 | 17 | 0 | 8 | 7 | 5 | 1 | 2 | 0 | 1 |
4,080 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/daemons/receiverdaemon.py
|
alignak.daemons.receiverdaemon.Receiver
|
class Receiver(Satellite):
"""Receiver class.
The receiver daemon is the satellite that is in charge to "listen" on the external
World. Thanks to extra modules, it will listen to:
- NSCA messages
- external comands
- HTTP passive checks
-...
"""
my_type = 'receiver'
properties = Satellite.properties.copy()
properties.update({
'type':
StringProp(default='receiver'),
'port':
IntegerProp(default=7773)
})
def __init__(self, **kwargs):
"""Receiver daemon initialisation
:param kwargs: command line arguments
"""
super(Receiver, self).__init__(kwargs.get('daemon_name', 'Default-receiver'), **kwargs)
# Our schedulers and arbiters are initialized in the base class
# Our related daemons
# self.pollers = {}
# self.reactionners = {}
# Modules are load one time
self.have_modules = False
# Now an external commands manager and a list for the external_commands
self.external_commands_manager = None
# and the unprocessed one, a buffer
self.unprocessed_external_commands = []
self.accept_passive_unknown_check_results = False
self.http_interface = GenericInterface(self)
def add(self, elt):
"""Generic function to add objects to the daemon internal lists.
Manage Broks, External commands
:param elt: object to add
:type elt: alignak.AlignakObject
:return: None
"""
# external commands may be received as a dictionary when pushed from the WebUI
if isinstance(elt, dict) and 'my_type' in elt and elt['my_type'] == "externalcommand":
if 'cmd_line' not in elt:
logger.debug("Received a badly formatted external command: %s. "
"No cmd_line!", elt)
return
logger.debug("Received a dictionary external command: %s", elt)
if 'creation_timestamp' not in elt:
elt['creation_timestamp'] = None
elt = ExternalCommand(elt['cmd_line'], elt['creation_timestamp'])
if isinstance(elt, Brok):
# For brok, we tag the brok with our instance_id
elt.instance_id = self.instance_id
if elt.type == 'monitoring_log':
# The brok is a monitoring event
with self.events_lock:
self.events.append(elt)
statsmgr.counter('events', 1)
# Also add to our broks
with self.broks_lock:
self.broks.append(elt)
statsmgr.counter('broks.added', 1)
elif isinstance(elt, ExternalCommand):
logger.debug("Queuing an external command: %s", str(ExternalCommand.__dict__))
self.unprocessed_external_commands.append(elt)
statsmgr.counter('external-commands.added', 1)
def setup_new_conf(self):
"""Receiver custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a receiver daemon:
- get and configure its satellites
- configure the modules
:return: None
"""
# Execute the base class treatment...
super(Receiver, self).setup_new_conf()
# ...then our own specific treatment!
with self.conf_lock:
# self_conf is our own configuration from the alignak environment
# self_conf = self.cur_conf['self_conf']
logger.debug("Got config: %s", self.cur_conf)
# Configure and start our modules
if not self.have_modules:
try:
self.modules = unserialize(self.cur_conf['modules'], no_json=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
if self.modules:
logger.info("I received some modules configuration")
self.have_modules = True
self.do_load_modules(self.modules)
# and start external modules too
self.modules_manager.start_external_instances()
else:
logger.info("I do not have modules")
# Now create the external commands manager
# We are a receiver: our role is to get and dispatch commands to the schedulers
global_conf = self.cur_conf.get('global_conf', None)
if not global_conf:
logger.error("Received a configuration without any global_conf! "
"This may hide a configuration problem with the "
"realms and the manage_sub_realms of the satellites!")
global_conf = {
'accept_passive_unknown_check_results': False,
'log_external_commands': True
}
self.external_commands_manager = \
ExternalCommandManager(None, 'receiver', self,
global_conf.get(
'accept_passive_unknown_check_results', False),
global_conf.get(
'log_external_commands', False))
# Initialize connection with all our satellites
logger.info("Initializing connection with my satellites:")
my_satellites = self.get_links_of_type(s_type='')
for satellite in list(my_satellites.values()):
logger.info("- : %s/%s", satellite.type, satellite.name)
if not self.daemon_connection_init(satellite):
logger.error("Satellite connection failed: %s", satellite)
# Now I have a configuration!
self.have_conf = True
def get_external_commands_from_arbiters(self):
"""Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
"""
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
# Simple protection against None value
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp: # pylint: disable=broad-except
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp)
def push_external_commands_to_schedulers(self):
"""Push received external commands to the schedulers
:return: None
"""
if not self.unprocessed_external_commands:
return
# Those are the global external commands
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
logger.debug("Commands: %s", commands_to_process)
# Now get all external commands and put them into the good schedulers
logger.debug("Commands to process: %d commands", len(commands_to_process))
for ext_cmd in commands_to_process:
cmd = self.external_commands_manager.resolve_command(ext_cmd)
logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd)
if cmd and cmd['global']:
# Send global command to all our schedulers
for scheduler_link_uuid in self.schedulers:
self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd)
# Now for all active schedulers, send the commands
count_pushed_commands = 0
count_failed_commands = 0
for scheduler_link_uuid in self.schedulers:
link = self.schedulers[scheduler_link_uuid]
if not link.active:
logger.debug("The scheduler '%s' is not active, it is not possible to push "
"external commands to its connection!", link.name)
continue
# If there are some commands for this scheduler...
commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands]
if not commands:
logger.debug("The scheduler '%s' has no commands.", link.name)
continue
logger.debug("Sending %d commands to scheduler %s", len(commands), link.name)
sent = []
try:
sent = link.push_external_commands(commands)
except LinkError:
logger.warning("Scheduler connection failed, I could not push external commands!")
# Whether we sent the commands or not, clean the scheduler list
link.pushed_commands = []
# If we didn't sent them, add the commands to the arbiter list
if sent:
statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands))
count_pushed_commands = count_pushed_commands + len(commands)
else:
count_failed_commands = count_failed_commands + len(commands)
statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands))
# Kepp the not sent commands... for a next try
self.external_commands.extend(commands)
statsmgr.gauge('external-commands.pushed.all', count_pushed_commands)
statsmgr.gauge('external-commands.failed.all', count_failed_commands)
def do_loop_turn(self):
"""Receiver daemon main loop
:return: None
"""
# Begin to clean modules
self.check_and_del_zombie_modules()
# Maybe the arbiter pushed a new configuration...
if self.watch_for_new_conf(timeout=0.05):
logger.info("I got a new configuration...")
# Manage the new configuration
self.setup_new_conf()
# Maybe external modules raised 'objects'
# we should get them
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('core.get-objects-from-queues', time.time() - _t0)
# Get external commands from the arbiters...
_t0 = time.time()
self.get_external_commands_from_arbiters()
statsmgr.timer('external-commands.got.time', time.time() - _t0)
statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands))
_t0 = time.time()
self.push_external_commands_to_schedulers()
statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
# Say to modules it's a new tick :)
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# Call the base Daemon one
res = super(Receiver, self).get_daemon_stats(details=details)
res.update({'name': self.name, 'type': self.type})
counters = res['counters']
counters['external-commands'] = len(self.external_commands)
counters['external-commands-unprocessed'] = len(self.unprocessed_external_commands)
return res
def main(self):
"""Main receiver function
Init daemon and loop forever
:return: None
"""
try:
# Start the daemon mode
if not self.do_daemon_init_and_start():
self.exit_on_error(message="Daemon initialization error", exit_code=3)
# We wait for initial conf
self.wait_for_initial_conf()
if self.new_conf:
# Setup the received configuration
self.setup_new_conf()
# Now the main loop
self.do_main_loop()
logger.info("Exited from the main loop.")
self.request_stop()
except Exception: # pragma: no cover, this should never happen indeed ;)
self.exit_on_exception(traceback.format_exc())
raise
|
class Receiver(Satellite):
'''Receiver class.
The receiver daemon is the satellite that is in charge to "listen" on the external
World. Thanks to extra modules, it will listen to:
- NSCA messages
- external comands
- HTTP passive checks
-...
'''
def __init__(self, **kwargs):
'''Receiver daemon initialisation
:param kwargs: command line arguments
'''
pass
def add(self, elt):
'''Generic function to add objects to the daemon internal lists.
Manage Broks, External commands
:param elt: object to add
:type elt: alignak.AlignakObject
:return: None
'''
pass
def setup_new_conf(self):
'''Receiver custom setup_new_conf method
This function calls the base satellite treatment and manages the configuration needed
for a receiver daemon:
- get and configure its satellites
- configure the modules
:return: None
'''
pass
def get_external_commands_from_arbiters(self):
'''Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None
'''
pass
def push_external_commands_to_schedulers(self):
'''Push received external commands to the schedulers
:return: None
'''
pass
def do_loop_turn(self):
'''Receiver daemon main loop
:return: None
'''
pass
def get_daemon_stats(self, details=False):
'''Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
'''
pass
def main(self):
'''Main receiver function
Init daemon and loop forever
:return: None
'''
pass
| 9 | 9 | 37 | 6 | 21 | 10 | 5 | 0.49 | 1 | 11 | 6 | 0 | 8 | 7 | 8 | 90 | 320 | 57 | 178 | 39 | 169 | 88 | 155 | 37 | 146 | 10 | 4 | 3 | 39 |
4,081 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/checkmodulation.py
|
alignak.objects.checkmodulation.CheckModulations
|
class CheckModulations(CommandCallItems):
"""CheckModulations class allowed to handle easily several CheckModulation objects
"""
inner_class = CheckModulation
def linkify(self, timeperiods, commands):
"""Replace check_period by real Timeperiod object into each CheckModulation
Replace check_command by real Command object into each CheckModulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link to
:type commands: alignak.objects.command.Commands
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_commands(commands, 'check_command')
|
class CheckModulations(CommandCallItems):
'''CheckModulations class allowed to handle easily several CheckModulation objects
'''
def linkify(self, timeperiods, commands):
'''Replace check_period by real Timeperiod object into each CheckModulation
Replace check_command by real Command object into each CheckModulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link to
:type commands: alignak.objects.command.Commands
:return: None
'''
pass
| 2 | 2 | 12 | 1 | 3 | 8 | 1 | 2 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 48 | 18 | 3 | 5 | 3 | 3 | 10 | 5 | 3 | 3 | 1 | 3 | 0 | 1 |
4,082 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/command.py
|
alignak.objects.command.Command
|
class Command(Item):
"""
Class to manage a command
A command is an external command that a poller module runs to
check if something is ok or not
"""
__metaclass__ = AutoSlots
my_type = "command"
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'command_name':
StringProp(fill_brok=[FULL_STATUS]),
'command_line':
StringProp(fill_brok=[FULL_STATUS]),
'poller_tag':
StringProp(default=u'None'),
'reactionner_tag':
StringProp(default=u'None'),
'module_type':
StringProp(default=u'fork'),
'timeout':
IntegerProp(default=-1),
'enable_environment_macros':
BoolProp(default=False),
})
def __init__(self, params, parsing=True):
super(Command, self).__init__(params, parsing=parsing)
self.fill_default()
if getattr(self, 'command_line', '').startswith('_'):
# For an internal command...
self.module_type = u'internal'
def __str__(self): # pragma: no cover
return "<Command %s, command line: %s />" % \
(self.get_name(), getattr(self, 'command_line', 'Unset'))
__repr__ = __str__
def fill_data_brok_from(self, data, brok_type):
"""
Add properties to data if fill_brok of these class properties
is same as brok_type
:param data: dictionnary of this command
:type data: dict
:param brok_type: type of brok
:type brok_type: str
:return: None
"""
cls = self.__class__
# Now config properties
for prop, entry in list(cls.properties.items()):
# Is this property intended for broking?
# if 'fill_brok' in entry[prop]:
if brok_type in entry.fill_brok:
if hasattr(self, prop):
data[prop] = getattr(self, prop)
# elif 'default' in entry[prop]:
# data[prop] = entry.default
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# _internal_host_check is for having an host check result
# without running a check plugin
if self.command_name.startswith('_internal_host_check'):
# Command line may contain: [state_id][;output]
parameters = self.command_line.split(';')
if len(parameters) < 2:
self.command_name = "_internal_host_check;0;Host assumed to be UP"
self.add_warning("has no defined state nor output. Changed to %s"
% self.command_name)
elif len(parameters) < 3:
state = 3
try:
state = int(parameters[1])
except ValueError:
self.add_warning("required a non integer state: %s. Using 3."
% parameters[1])
if state > 4:
self.add_warning("required an impossible state: %d. Using 3." % state)
output = {0: "UP", 1: "DOWN", 2: "DOWN", 3: "UNKNOWN", 4: "UNREACHABLE", }[state]
self.command_name = "_internal_host_check;Host assumed to be %s" % output
self.add_warning("has no defined output. Changed to %s" % self.command_name)
elif len(parameters) > 3:
self.command_name = "%s;%s;%s" % (parameters[0], parameters[1], parameters[2])
self.add_warning("has too many parameters. Changed to %s" % self.command_name)
return super(Command, self).is_correct() and state
|
class Command(Item):
'''
Class to manage a command
A command is an external command that a poller module runs to
check if something is ok or not
'''
def __init__(self, params, parsing=True):
pass
def __str__(self):
pass
def fill_data_brok_from(self, data, brok_type):
'''
Add properties to data if fill_brok of these class properties
is same as brok_type
:param data: dictionnary of this command
:type data: dict
:param brok_type: type of brok
:type brok_type: str
:return: None
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 5 | 3 | 18 | 3 | 10 | 6 | 4 | 0.5 | 1 | 4 | 0 | 0 | 4 | 2 | 4 | 38 | 106 | 17 | 60 | 17 | 55 | 30 | 40 | 17 | 35 | 7 | 3 | 3 | 14 |
4,083 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/command.py
|
alignak.objects.command.Commands
|
class Commands(Items):
"""
Class to manage all commands
A command is an external command the poller module run to
see if something is ok or not
"""
inner_class = Command
|
class Commands(Items):
'''
Class to manage all commands
A command is an external command the poller module run to
see if something is ok or not
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 7 | 0 | 2 | 2 | 1 | 5 | 2 | 2 | 1 | 0 | 2 | 0 | 0 |
4,084 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/itemgroup.py
|
alignak.objects.itemgroup.Itemgroups
|
class Itemgroups(Items):
"""
Class to manage list of groups of items
An itemgroup is used to group items groups
"""
def add(self, item_group):
"""
Add an item (Itemgroup) to the known groups
:param item_group: an item
:type item_group: alignak.objects.itemgroup.Itemgroup
:return: None
"""
self.add_item(item_group)
|
class Itemgroups(Items):
'''
Class to manage list of groups of items
An itemgroup is used to group items groups
'''
def add(self, item_group):
'''
Add an item (Itemgroup) to the known groups
:param item_group: an item
:type item_group: alignak.objects.itemgroup.Itemgroup
:return: None
'''
pass
| 2 | 2 | 9 | 1 | 2 | 6 | 1 | 3.33 | 1 | 0 | 0 | 4 | 1 | 0 | 1 | 46 | 15 | 2 | 3 | 2 | 1 | 10 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
4,085 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/macromodulation.py
|
alignak.objects.macromodulation.MacroModulation
|
class MacroModulation(Item):
"""
Class to manage a MacroModulation
A MacroModulation is defined to change critical and warning level in some periods (like the
night)
"""
my_type = 'macromodulation'
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'macromodulation_name':
StringProp(fill_brok=[FULL_STATUS]),
'modulation_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=[FULL_STATUS]),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'customs':
DictProp(default={}, fill_brok=[FULL_STATUS]),
})
special_properties = ('modulation_period',)
macros = {}
def is_active(self, timperiods):
"""
Know if this macro is active for this correct period
:return: True is we are in the period, otherwise False
:rtype: bool
"""
now = int(time.time())
timperiod = timperiods[self.modulation_period]
if not timperiod or timperiod.is_time_valid(now):
return True
return False
def is_correct(self):
"""
Check if this object configuration is correct ::
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Ok just put None as modulation_period, means 24x7
if not hasattr(self, 'modulation_period'):
self.modulation_period = None
if not hasattr(self, 'customs') or not self.customs:
self.add_error("[macromodulation::%s] contains no macro definition"
% self.get_name())
state = False
return super(MacroModulation, self).is_correct() and state
|
class MacroModulation(Item):
'''
Class to manage a MacroModulation
A MacroModulation is defined to change critical and warning level in some periods (like the
night)
'''
def is_active(self, timperiods):
'''
Know if this macro is active for this correct period
:return: True is we are in the period, otherwise False
:rtype: bool
'''
pass
def is_correct(self):
'''
Check if this object configuration is correct ::
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 3 | 3 | 17 | 3 | 8 | 6 | 3 | 0.53 | 1 | 2 | 0 | 0 | 2 | 1 | 2 | 36 | 61 | 12 | 32 | 13 | 29 | 17 | 23 | 13 | 20 | 3 | 3 | 1 | 5 |
4,086 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/macromodulation.py
|
alignak.objects.macromodulation.MacroModulations
|
class MacroModulations(Items):
"""
Class to manage all MacroModulation
"""
name_property = "macromodulation_name"
inner_class = MacroModulation
def linkify(self, timeperiods):
"""
Link with timeperiod
:param timeperiods: Timeperiod object
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'modulation_period')
|
class MacroModulations(Items):
'''
Class to manage all MacroModulation
'''
def linkify(self, timeperiods):
'''
Link with timeperiod
:param timeperiods: Timeperiod object
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
'''
pass
| 2 | 2 | 9 | 1 | 2 | 6 | 1 | 1.8 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 46 | 16 | 2 | 5 | 4 | 3 | 9 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
4,087 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/module.py
|
alignak.objects.module.Module
|
class Module(Item):
"""
Class to manage a module
"""
my_type = 'module'
my_name_property = "name"
properties = Item.properties.copy()
properties.update({
'name':
StringProp(default=u'unset'),
'type':
StringProp(default=u'unset'),
'daemon':
StringProp(default=u'unset'),
'python_name':
StringProp(),
'enabled':
BoolProp(default=True),
# Old "deprecated" property - replaced with name
'module_alias':
StringProp(),
# Old "deprecated" property - replaced with type
'module_types':
ListProp(default=[''], split_on_comma=True),
# Allow a module to be related some other modules
'modules':
ListProp(default=[''], split_on_comma=True),
# Module log level
'log_level':
StringProp(default=u'INFO'),
# Local statsd daemon for collecting daemon metrics
'statsd_host':
StringProp(default=u'localhost'),
'statsd_port':
IntegerProp(default=8125),
'statsd_prefix':
StringProp(default=u'alignak'),
'statsd_enabled':
BoolProp(default=False)
})
macros = {}
def __init__(self, params, parsing=True):
# Must be declared in this function rather than as class variable. This because the
# modules may have some properties that are not the same from one instance to another.
# Other objects very often have the same properties... but not the modules!
self.properties = Item.properties.copy()
self.properties.update({
'name':
StringProp(default=u'unset'),
'type':
StringProp(default=u'unset'),
'daemon':
StringProp(default=u'unset'),
'python_name':
StringProp(),
# Old "deprecated" property - replaced with name
'module_alias':
StringProp(),
# Old "deprecated" property - replaced with type
'module_types':
ListProp(default=[''], split_on_comma=True),
# Allow a module to be related some other modules
'modules':
ListProp(default=[''], split_on_comma=True),
'enabled':
BoolProp(default=True),
# Module log level
'log_level':
StringProp(default=u'INFO'),
# Local statsd daemon for collecting daemon metrics
'statsd_host':
StringProp(default=u'localhost'),
'statsd_port':
IntegerProp(default=8125),
'statsd_prefix':
StringProp(default=u'alignak'),
'statsd_enabled':
BoolProp(default=False)
})
# Manage the missing module name
if params and 'name' not in params:
if 'module_alias' in params:
params['name'] = params['module_alias']
else:
params['name'] = "Unnamed"
if params and 'module_alias' not in params:
if 'name' in params:
params['module_alias'] = params['name']
else:
params['module_alias'] = "Unnamed"
super(Module, self).__init__(params, parsing=parsing)
self.fill_default()
try:
self.modules = unserialize(self.modules, no_json=True)
except AlignakClassLookupException as exp: # pragma: no cover, simple protection
logger.error('Cannot un-serialize modules configuration '
'received from arbiter: %s', exp)
# # Remove extra Item base class properties...
# for prop in ['customs', 'plus', 'downtimes', 'old_properties',
# 'configuration_errors', 'configuration_warnings']:
# if getattr(self, prop, None):
# delattr(self, prop)
def __str__(self): # pragma: no cover
return '<Module %s, module: %s, type(s): %s />' % \
(self.get_name(), getattr(self, 'python_name', 'Unknown'),
getattr(self, 'type', 'Unknown'))
__repr__ = __str__
def get_types(self):
"""
Get types of the module
:return: Types of the module
:rtype: str
"""
return getattr(self, 'module_types', 'Untyped module')
def is_a_module(self, module_type):
"""
Is the module of the required type?
:param module_type: module type to check
:type: str
:return: True / False
"""
if hasattr(self, 'type'):
return module_type in self.type
return module_type in self.module_types
def serialize(self, no_json=True, printing=False):
"""A module may have some properties that are not defined in the class properties list.
Serializing a module is the same as serializing an Item but we also include all the
existing properties that are not defined in the properties or running_properties
class list.
We must also exclude the reference to the daemon that loaded the module!
"""
res = super(Module, self).serialize(no_json=no_json, printing=printing)
for prop in self.__dict__:
if prop in self.__class__.properties or \
prop in self.__class__.running_properties or \
prop in ['properties', 'old_properties', 'my_daemon']:
continue
if prop in ['modules'] and getattr(self, prop):
res[prop] = [m.serialize(no_json=no_json, printing=printing)
for m in self.modules]
else:
res[prop] = getattr(self, prop)
return res
|
class Module(Item):
'''
Class to manage a module
'''
def __init__(self, params, parsing=True):
pass
def __str__(self):
pass
def get_types(self):
'''
Get types of the module
:return: Types of the module
:rtype: str
'''
pass
def is_a_module(self, module_type):
'''
Is the module of the required type?
:param module_type: module type to check
:type: str
:return: True / False
'''
pass
def serialize(self, no_json=True, printing=False):
'''A module may have some properties that are not defined in the class properties list.
Serializing a module is the same as serializing an Item but we also include all the
existing properties that are not defined in the properties or running_properties
class list.
We must also exclude the reference to the daemon that loaded the module!
'''
pass
| 6 | 4 | 22 | 2 | 14 | 6 | 3 | 0.39 | 1 | 6 | 5 | 0 | 5 | 1 | 5 | 39 | 167 | 24 | 104 | 15 | 98 | 41 | 41 | 14 | 35 | 6 | 3 | 2 | 14 |
4,088 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/module.py
|
alignak.objects.module.Modules
|
class Modules(Items):
"""
Class to manage list of modules
Modules is used to group all Module
"""
inner_class = Module
def linkify(self):
"""Link a module to some other modules
:return: None
"""
self.linkify_module_by_module()
def linkify_module_by_module(self):
"""Link a module to some other modules
:return: None
"""
for module in self:
new_modules = []
for related in getattr(module, 'modules', []):
related = related.strip()
if not related:
continue
o_related = self.find_by_name(related)
if o_related is not None:
new_modules.append(o_related)
else:
self.add_error("the module '%s' for the module '%s' is unknown"
% (related, module.get_name()))
module.modules = new_modules
if module.modules:
logger.info("Module %s is linked to %s", module.get_name(), module.modules)
|
class Modules(Items):
'''
Class to manage list of modules
Modules is used to group all Module
'''
def linkify(self):
'''Link a module to some other modules
:return: None
'''
pass
def linkify_module_by_module(self):
'''Link a module to some other modules
:return: None
'''
pass
| 3 | 3 | 13 | 1 | 9 | 3 | 4 | 0.5 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 47 | 34 | 4 | 20 | 8 | 17 | 10 | 18 | 8 | 15 | 6 | 2 | 3 | 7 |
4,089 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/notificationway.py
|
alignak.objects.notificationway.NotificationWay
|
class NotificationWay(Item):
"""NotificationWay class is used to implement way of sending notifications (command, periods..)
"""
my_type = 'notificationway'
my_name_property = "%s_name" % my_type
properties = Item.properties.copy()
properties.update({
'notificationway_name':
StringProp(fill_brok=[FULL_STATUS]),
'host_notification_period':
StringProp(fill_brok=[FULL_STATUS]),
'service_notification_period':
StringProp(fill_brok=[FULL_STATUS]),
'host_notification_options':
ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True),
'service_notification_options':
ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True),
'host_notification_commands':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'service_notification_commands':
ListProp(default=[], fill_brok=[FULL_STATUS]),
'min_business_impact':
IntegerProp(default=0, fill_brok=[FULL_STATUS]),
})
running_properties = Item.running_properties.copy()
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Alignak deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {}
special_properties = ('service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period')
def __init__(self, params, parsing=True):
for prop in ['service_notification_commands', 'host_notification_commands']:
if prop not in params or params[prop] is None:
continue
# if not parsing:
# # When deserialized, those are dict and we recreate the object
# print("nw: %s / %s" % (self, params[prop]))
# setattr(self, prop, [unserialize(elem) for elem in params[prop]])
# else:
# new_list = [(CommandCall(elem, parsing=parsing) if isinstance(elem, dict) else
# elem) for elem in params[prop]]
# # We recreate the object
# setattr(self, prop, new_list)
#
if not isinstance(params[prop], list):
params[prop] = [params[prop]]
setattr(self, prop, [(CommandCall(elem, parsing=parsing) if isinstance(elem, dict)
else elem) for elem in params[prop]])
# And remove prop, to prevent from being overridden
del params[prop]
super(NotificationWay, self).__init__(params, parsing=parsing)
@property
def host_notifications_enabled(self):
"""Notifications are enabled for the hosts
This is True if 'n' is not existing in the notification options array
:return: True if 'n' is not existing in the notification options array
:rtype: bool
"""
return 'n' not in getattr(self, 'host_notification_options', ['n'])
@property
def service_notifications_enabled(self):
"""Notifications are enabled for the services
This is True if 'n' is not existing in the notification options array
:return: True if 'n' is not existing in the notification options array
:rtype: bool
"""
return 'n' not in getattr(self, 'service_notification_options', ['n'])
def serialize(self, no_json=True, printing=False):
res = super(NotificationWay, self).serialize()
res['service_notification_commands'] = \
[elem.serialize(no_json=no_json, printing=printing)
for elem in getattr(self, 'service_notification_commands')]
res['host_notification_commands'] = \
[elem.serialize(no_json=no_json, printing=printing)
for elem in getattr(self, 'host_notification_commands')]
return res
def want_service_notification(self, timeperiods, timestamp, state, n_type, business_impact,
cmd=None):
# pylint: disable=too-many-return-statements
"""Check if notification options match the state of the service
Notification is NOT wanted in ONE of the following case::
* service notifications are disabled
* cmd is not in service_notification_commands
* business_impact < self.min_business_impact
* service_notification_period is not valid
* state does not match service_notification_options for problem, recovery and flapping
* state does not match host_notification_options for downtime
:param timeperiods: list of time periods
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
"""
# If notification ways are not enabled for services
if not self.service_notifications_enabled:
return False
# Maybe the command we ask for is not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.service_notification_commands:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
notif_period = timeperiods[self.service_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if in_notification_period:
short_states = {
u'WARNING': 'w', u'UNKNOWN': 'u', u'CRITICAL': 'c', u'UNREACHABLE': 'x',
u'RECOVERY': 'r', u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.service_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.service_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.service_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
# No notification when a downtime was cancelled. Is that true??
# According to the documentation we need to look at _host_ options
return 's' in self.host_notification_options
return False
def want_host_notification(self, timeperiods, timestamp, state, n_type, business_impact,
cmd=None):
# pylint: disable=too-many-return-statements
"""Check if notification options match the state of the host
Notification is NOT wanted in ONE of the following case::
* host notifications are disabled
* cmd is not in host_notification_commands
* business_impact < self.min_business_impact
* host_notification_period is not valid
* state does not match host_notification_options for problem, recovery, flapping and dt
:param timeperiods: list of time periods
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
"""
# If notification ways are not enabled for hosts
if not self.host_notifications_enabled:
return False
# If the business_impact is not high enough, we bail out
if business_impact < self.min_business_impact:
return False
# Maybe the command we ask for are not for us, but for another notification ways
# on the same contact. If so, bail out
if cmd and cmd not in self.host_notification_commands:
return False
notif_period = timeperiods[self.host_notification_period]
in_notification_period = notif_period.is_time_valid(timestamp)
if in_notification_period:
short_states = {
u'DOWN': 'd', u'UNREACHABLE': 'u', u'RECOVERY': 'r',
u'FLAPPING': 'f', u'DOWNTIME': 's'
}
if n_type == u'PROBLEM' and state in short_states:
return short_states[state] in self.host_notification_options
if n_type == u'RECOVERY' and n_type in short_states:
return short_states[n_type] in self.host_notification_options
if n_type == u'ACKNOWLEDGEMENT':
return in_notification_period
if n_type in (u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'):
return 'f' in self.host_notification_options
if n_type in (u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'):
return 's' in self.host_notification_options
return False
def get_notification_commands(self, o_type):
"""Get notification commands for object type
:param o_type: object type (host or service)
:type o_type: str
:return: command list
:rtype: list[alignak.objects.command.Command]
"""
return getattr(self, o_type + '_notification_commands', []) or []
def is_correct(self):
# pylint: disable=too-many-branches
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
# Internal checks before executing inherited function...
# Service part
if self.service_notifications_enabled:
if getattr(self, 'service_notification_commands', None) is None:
self.add_warning("do not have any service_notification_commands defined")
self.service_notification_commands = []
else:
for cmd in self.service_notification_commands:
if cmd is None:
self.add_error("a service_notification_command is missing")
elif not cmd.is_valid():
self.add_error("a service_notification_command is invalid (%s)" % cmd)
if getattr(self, 'service_notification_period', None) is None:
self.add_error("the service_notification_period is invalid")
# Now host part
if self.host_notifications_enabled:
if getattr(self, 'host_notification_commands', None) is None:
self.add_warning("do not have any host_notification_commands defined")
self.host_notification_commands = []
else:
for cmd in self.host_notification_commands:
if cmd is None:
self.add_error("a host_notification_command is missing")
elif not cmd.is_valid():
self.add_error("a host_notification_command is invalid (%s)" % cmd)
if getattr(self, 'host_notification_period', None) is None:
self.add_error("the host_notification_period is invalid")
return super(NotificationWay, self).is_correct() and self.conf_is_correct
|
class NotificationWay(Item):
'''NotificationWay class is used to implement way of sending notifications (command, periods..)
'''
def __init__(self, params, parsing=True):
pass
@property
def host_notifications_enabled(self):
'''Notifications are enabled for the hosts
This is True if 'n' is not existing in the notification options array
:return: True if 'n' is not existing in the notification options array
:rtype: bool
'''
pass
@property
def service_notifications_enabled(self):
'''Notifications are enabled for the services
This is True if 'n' is not existing in the notification options array
:return: True if 'n' is not existing in the notification options array
:rtype: bool
'''
pass
def serialize(self, no_json=True, printing=False):
pass
def want_service_notification(self, timeperiods, timestamp, state, n_type, business_impact,
cmd=None):
'''Check if notification options match the state of the service
Notification is NOT wanted in ONE of the following case::
* service notifications are disabled
* cmd is not in service_notification_commands
* business_impact < self.min_business_impact
* service_notification_period is not valid
* state does not match service_notification_options for problem, recovery and flapping
* state does not match host_notification_options for downtime
:param timeperiods: list of time periods
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
'''
pass
def want_host_notification(self, timeperiods, timestamp, state, n_type, business_impact,
cmd=None):
'''Check if notification options match the state of the host
Notification is NOT wanted in ONE of the following case::
* host notifications are disabled
* cmd is not in host_notification_commands
* business_impact < self.min_business_impact
* host_notification_period is not valid
* state does not match host_notification_options for problem, recovery, flapping and dt
:param timeperiods: list of time periods
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param timestamp: time we want to notify the contact (usually now)
:type timestamp: int
:param state: host or service state ("WARNING", "CRITICAL" ..)
:type state: str
:param n_type: type of notification ("PROBLEM", "RECOVERY" ..)
:type n_type: str
:param business_impact: impact of this service
:type business_impact: int
:param cmd: command launched to notify the contact
:type cmd: str
:return: True if no condition is matched, otherwise False
:rtype: bool
'''
pass
def get_notification_commands(self, o_type):
'''Get notification commands for object type
:param o_type: object type (host or service)
:type o_type: str
:return: command list
:rtype: list[alignak.objects.command.Command]
'''
pass
def is_correct(self):
'''Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
| 11 | 7 | 28 | 4 | 13 | 12 | 5 | 0.74 | 1 | 4 | 1 | 0 | 8 | 2 | 8 | 42 | 277 | 44 | 134 | 31 | 121 | 99 | 95 | 27 | 86 | 13 | 3 | 4 | 42 |
4,090 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/itemgroup.py
|
alignak.objects.itemgroup.Itemgroup
|
class Itemgroup(Item):
"""
Class to manage a group of items
An Itemgroup is used to group items (eg. Host, Service,...)
"""
members_property = "members"
group_members_property = ""
properties = Item.properties.copy()
properties.update({
'members':
ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True)
})
running_properties = Item.running_properties.copy()
running_properties.update({
'unknown_members':
ListProp(default=[]),
})
def __repr__(self): # pragma: no cover
if getattr(self, 'members', None) is None or not getattr(self, 'members'):
return "<%s %s, no members/>" % (self.__class__.__name__, self.get_name())
# Build a sorted list of elements name or uuid, this to make it easier to compare ;)
dump_list = sorted([str(item.get_name()
if isinstance(item, Item) else item) for item in self])
return "<%s %s, %d members: %s/>" \
% (self.__class__.__name__, self.get_name(), len(self.members), dump_list)
__str__ = __repr__
def __iter__(self):
return self.members.__iter__()
def __delitem__(self, i):
try:
self.members.remove(i)
except ValueError:
pass
def copy_shell(self):
"""
Copy the group properties EXCEPT the members.
Members need to be filled after manually
:return: Itemgroup object
:rtype: alignak.objects.itemgroup.Itemgroup
:return: None
"""
cls = self.__class__
# create a new group with empty parameters
new_i = cls({})
new_i.uuid = self.uuid # with the same id
# Copy all properties
for prop in cls.properties:
if not hasattr(self, prop):
continue
# todo: check why a specific initialization is done!
if prop in ['members', 'unknown_members']:
setattr(new_i, prop, [])
else:
setattr(new_i, prop, getattr(self, prop))
return new_i
def replace_members(self, members):
"""
Replace members of itemgroup by new members list
:param members: list of members
:type members: list
:return: None
"""
self.members = members
def get_members(self):
"""Get the members of the group
:return: list of members
:rtype: list
"""
return getattr(self, 'members', [])
def add_members(self, members):
"""Add a new member to the members list
:param members: member name
:type members: str
:return: None
"""
if not isinstance(members, list):
members = [members]
if not getattr(self, 'members', None):
self.members = members
else:
self.members.extend(members)
def add_unknown_members(self, members):
"""Add a new member to the unknown members list
:param members: member name
:type members: str
:return: None
"""
if not isinstance(members, list):
members = [members]
if not hasattr(self, 'unknown_members'):
self.unknown_members = members
else:
self.unknown_members.extend(members)
def is_correct(self):
"""
Check if a group is valid.
This means that all the declared group members exist, thus the list of unknown_members
must be empty
:return: True if group is correct, otherwise False
:rtype: bool
"""
state = True
# Make members unique, remove duplicates
if self.members:
self.members = list(set(self.members))
if self.unknown_members:
for member in self.unknown_members:
self.add_error("as %s, got unknown member '%s'" % (self.__class__.my_type, member))
state = False
return super(Itemgroup, self).is_correct() and state
def get_initial_status_brok(self, extra=None):
"""
Get a brok with the group properties
`members` contains a list of uuid which we must provide the names. Thus we will replace
the default provided uuid with the members short name. The `extra` parameter, if present,
is containing the Items to search for...
:param extra: monitoring items, used to recover members
:type extra: alignak.objects.item.Items
:return:Brok object
:rtype: object
"""
# Here members is a list of identifiers and we need their names
if extra and isinstance(extra, Items):
members = []
for member_id in self.members:
member = extra[member_id]
members.append((member.uuid, member.get_name()))
extra = {'members': members}
return super(Itemgroup, self).get_initial_status_brok(extra=extra)
|
class Itemgroup(Item):
'''
Class to manage a group of items
An Itemgroup is used to group items (eg. Host, Service,...)
'''
def __repr__(self):
pass
def __iter__(self):
pass
def __delitem__(self, i):
pass
def copy_shell(self):
'''
Copy the group properties EXCEPT the members.
Members need to be filled after manually
:return: Itemgroup object
:rtype: alignak.objects.itemgroup.Itemgroup
:return: None
'''
pass
def replace_members(self, members):
'''
Replace members of itemgroup by new members list
:param members: list of members
:type members: list
:return: None
'''
pass
def get_members(self):
'''Get the members of the group
:return: list of members
:rtype: list
'''
pass
def add_members(self, members):
'''Add a new member to the members list
:param members: member name
:type members: str
:return: None
'''
pass
def add_unknown_members(self, members):
'''Add a new member to the unknown members list
:param members: member name
:type members: str
:return: None
'''
pass
def is_correct(self):
'''
Check if a group is valid.
This means that all the declared group members exist, thus the list of unknown_members
must be empty
:return: True if group is correct, otherwise False
:rtype: bool
'''
pass
def get_initial_status_brok(self, extra=None):
'''
Get a brok with the group properties
`members` contains a list of uuid which we must provide the names. Thus we will replace
the default provided uuid with the members short name. The `extra` parameter, if present,
is containing the Items to search for...
:param extra: monitoring items, used to recover members
:type extra: alignak.objects.item.Items
:return:Brok object
:rtype: object
'''
pass
| 11 | 8 | 13 | 2 | 6 | 5 | 3 | 0.75 | 1 | 6 | 1 | 4 | 10 | 2 | 10 | 44 | 157 | 28 | 75 | 27 | 64 | 56 | 64 | 27 | 53 | 4 | 3 | 2 | 25 |
4,091 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/notificationway.py
|
alignak.objects.notificationway.NotificationWays
|
class NotificationWays(CommandCallItems):
"""NotificationWays manage a list of NotificationWay objects, used for parsing configuration
"""
name_property = "notificationway_name"
inner_class = NotificationWay
def linkify(self, timeperiods, commands):
"""Create link between objects::
* notificationways -> timeperiods
* notificationways -> commands
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link
:type commands: alignak.objects.command.Commands
:return: None
"""
self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
self.linkify_with_commands(commands, 'service_notification_commands', is_a_list=True)
self.linkify_with_commands(commands, 'host_notification_commands', is_a_list=True)
|
class NotificationWays(CommandCallItems):
'''NotificationWays manage a list of NotificationWay objects, used for parsing configuration
'''
def linkify(self, timeperiods, commands):
'''Create link between objects::
* notificationways -> timeperiods
* notificationways -> commands
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param commands: commands to link
:type commands: alignak.objects.command.Commands
:return: None
'''
pass
| 2 | 2 | 16 | 2 | 5 | 9 | 1 | 1.38 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 48 | 23 | 4 | 8 | 4 | 6 | 11 | 8 | 4 | 6 | 1 | 3 | 0 | 1 |
4,092 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_realms.py
|
tests.test_realms.TestRealms
|
class TestRealms(AlignakTest):
"""
This class test realms usage
"""
def setUp(self):
super(TestRealms, self).setUp()
self.set_unit_tests_logger_level('INFO')
def test_no_defined_realm(self):
""" Test configuration with no defined realm
Load a configuration with no realm defined:
- Alignak defines a default realm
- All hosts with no realm defined are in this default realm
:return: None
"""
self.setup_with_file('cfg/realms/no_defined_realms.cfg', 'cfg/realms/no_defined_realms.ini')
assert self.conf_is_correct
self.show_logs()
self.assert_any_log_match(re.escape("No realms defined, I am adding one as All"))
self.assert_any_log_match(re.escape("No reactionner defined, I am adding one on 127.0.0.1:10000"))
self.assert_any_log_match(re.escape("No poller defined, I am adding one on 127.0.0.1:10001"))
self.assert_any_log_match(re.escape("No broker defined, I am adding one on 127.0.0.1:10002"))
self.assert_any_log_match(re.escape("No receiver defined, I am adding one on 127.0.0.1:10003"))
# Only one realm in the configuration
assert len(self._arbiter.conf.realms) == 1
# All realm exists
realm = self._arbiter.conf.realms.find_by_name("All")
assert realm is not None
assert realm.realm_name == 'All'
assert realm.alias == 'Self created default realm'
assert realm.default
# All realm is the default realm
default_realm = self._arbiter.conf.realms.get_default()
assert realm == default_realm
# Default realm does not exist anymore
realm = self._arbiter.conf.realms.find_by_name("Default")
assert realm is None
# Hosts without realm definition are in the Default realm
hosts = self._arbiter.conf.hosts
assert len(hosts) == 2
for host in hosts:
assert host.realm == default_realm.uuid
assert host.realm_name == default_realm.get_name()
def test_default_realm(self):
""" Test configuration with no defined realm
Load a configuration with no realm defined:
- Alignak defines a default realm
- All hosts with no realm defined are in this default realm
:return: None
"""
self.setup_with_file('cfg/realms/no_defined_realms.cfg', 'cfg/realms/no_default_realm.ini')
assert self.conf_is_correct
self.show_logs()
self.assert_any_log_match(re.escape("No reactionner defined, I am adding one on 127.0.0.1:10000"))
self.assert_any_log_match(re.escape("No poller defined, I am adding one on 127.0.0.1:10001"))
self.assert_any_log_match(re.escape("No broker defined, I am adding one on 127.0.0.1:10002"))
self.assert_any_log_match(re.escape("No receiver defined, I am adding one on 127.0.0.1:10003"))
# Only one realm in the configuration
assert len(self._arbiter.conf.realms) == 1
# All realm exists
realm = self._arbiter.conf.realms.find_by_name("All")
assert realm is not None
assert realm.realm_name == 'All'
assert realm.alias == 'Self created default realm'
assert realm.default
# All realm is the default realm
default_realm = self._arbiter.conf.realms.get_default()
assert realm == default_realm
# Default realm does not exist anymore
realm = self._arbiter.conf.realms.find_by_name("Default")
assert realm is None
# Hosts without realm definition are in the Default realm
hosts = self._arbiter.conf.hosts
assert len(hosts) == 2
for host in hosts:
assert host.realm == default_realm.uuid
assert host.realm_name == default_realm.get_name()
def test_no_defined_daemons(self):
""" Test configuration with no defined daemons
Load a configuration with no realm nor daemons defined:
- Alignak defines a default realm
- All hosts with no realm defined are in this default realm
- Alignak defines default daemons
:return: None
"""
self.setup_with_file('cfg/realms/no_defined_daemons.cfg',
'cfg/realms/no_defined_daemons.ini', verbose=True)
assert self.conf_is_correct
self.show_logs()
self.assert_any_log_match(re.escape("No scheduler defined, I am adding one on 127.0.0.1:10000"))
self.assert_any_log_match(re.escape("No reactionner defined, I am adding one on 127.0.0.1:10001"))
self.assert_any_log_match(re.escape("No poller defined, I am adding one on 127.0.0.1:10002"))
self.assert_any_log_match(re.escape("No broker defined, I am adding one on 127.0.0.1:10003"))
self.assert_any_log_match(re.escape("No receiver defined, I am adding one on 127.0.0.1:10004"))
# self.assert_any_log_match(re.escape("Tagging Default-Poller with realm All"))
# self.assert_any_log_match(re.escape("Tagging Default-Broker with realm All"))
# self.assert_any_log_match(re.escape("Tagging Default-Reactionner with realm All"))
# self.assert_any_log_match(re.escape("Tagging Default-Scheduler with realm All"))
# self.assert_any_log_match(re.escape("Prepare dispatching for this realm"))
scheduler_link = self._arbiter.conf.schedulers.find_by_name('Default-Scheduler')
assert scheduler_link is not None
# # Scheduler configuration is ok
# assert self._schedulers['Default-Scheduler'].pushed_conf.conf_is_correct
# Broker, Poller, Reactionner named as in the configuration
link = self._arbiter.conf.brokers.find_by_name('Default-Broker')
assert link is not None
link = self._arbiter.conf.pollers.find_by_name('Default-Poller')
assert link is not None
link = self._arbiter.conf.reactionners.find_by_name('Default-Reactionner')
assert link is not None
# Receiver - a default receiver got created
assert self._arbiter.conf.receivers
# link = self._arbiter.conf.receivers.find_by_name('Default-Receiver')
# assert link is not None
# Only one realm in the configuration
assert len(self._arbiter.conf.realms) == 1
# 'All' realm exists
realm = self._arbiter.conf.realms.find_by_name("All")
assert realm is not None
assert realm.realm_name == 'All'
assert realm.alias == ''
assert realm.default
# 'All' realm is the default realm
default_realm = self._arbiter.conf.realms.get_default()
assert realm == default_realm
# Default realm does not exist anymore
realm = self._arbiter.conf.realms.find_by_name("Default")
assert realm is None
# Hosts without realm definition are in the Default realm
hosts = self._arbiter.conf.hosts
assert len(hosts) == 4
for host in hosts:
assert host.realm == default_realm.uuid
assert host.realm_name == default_realm.get_name()
def test_no_scheduler_in_realm(self):
""" Test missing scheduler in realm
A realm is defined but no scheduler, nor broker, nor poller exist for this realm
Configuration is not correct
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/realms/no_scheduler_in_realm.cfg')
self.show_logs()
assert not self.conf_is_correct
def test_no_scheduler_in_realm_self_add(self):
""" Test missing scheduler in realm, self add a scheduler
A realm is defined but no scheduler, nor broker, nor poller exist for this realm
:return: None
"""
self.setup_with_file('cfg/realms/no_scheduler_in_realm_self_add.cfg')
self.show_logs()
assert self.conf_is_correct
self.assert_any_log_match(re.escape("Adding a scheduler for the realm: Distant"))
self.assert_any_log_match(re.escape("Adding a poller for the realm: Distant"))
# self.assert_any_log_match(re.escape("Adding a broker for the realm: Distant"))
self.assert_any_log_match(re.escape("Adding a reactionner for the realm: Distant"))
self.assert_any_log_match(re.escape("Adding a receiver for the realm: Distant"))
self.assert_any_log_match(re.escape("Realm All: (in/potential) (schedulers:1/0) "
"(pollers:1/0) (reactionners:1/0) (brokers:1/0) "
"(receivers:1/0)"))
self.assert_any_log_match(re.escape("Realm Distant: (in/potential) (schedulers:1/0) "
"(pollers:1/0) (reactionners:1/0) (brokers:1/0) "
"(receivers:1/0)"))
assert "[config::Alignak global configuration] Some hosts exist in the realm 'Distant' " \
"but no scheduler is defined for this realm." in self.configuration_warnings
assert "[config::Alignak global configuration] Some hosts exist in the realm 'Distant' " \
"but no reactionner is defined for this realm." in self.configuration_warnings
assert "[config::Alignak global configuration] Some hosts exist in the realm 'Distant' " \
"but no receiver is defined for this realm." in self.configuration_warnings
assert "[config::Alignak global configuration] Some hosts exist in the realm 'Distant' " \
"but no scheduler is defined for this realm." in self.configuration_warnings
# Scheduler added for the realm
for link in self._arbiter.conf.schedulers:
print("Arbiter scheduler: %s" % link)
if link.name == 'scheduler-Distant':
break
else:
assert False
# Broker added for the realm
for link in self._arbiter.conf.brokers:
print("Arbiter broker: %s" % link)
if link.name == 'Broker-distant':
break
else:
assert False
# Poller added for the realm
for link in self._arbiter.conf.pollers:
if link.name == 'poller-Distant':
break
else:
assert False
# Reactionner added for the realm
for link in self._arbiter.conf.reactionners:
if link.name == 'reactionner-Distant':
break
else:
assert False
# Receiver added for the realm
for link in self._arbiter.conf.receivers:
if link.name == 'receiver-Distant':
break
else:
assert False
def test_no_broker_in_realm(self):
""" Test missing broker in realm
Test realms on each host
:return: None
"""
self.setup_with_file('cfg/realms/no_broker_in_realm.cfg', 'cfg/realms/no_broker_in_realm.ini')
self.show_logs()
assert self.conf_is_correct
dist = self._arbiter.conf.realms.find_by_name("Distant")
assert dist is not None
sched = self._arbiter.conf.schedulers.find_by_name("scheduler-distant")
assert sched is not None
assert 0 == len(self._arbiter.conf.realms[sched.realm].potential_brokers)
assert 0 == len(self._arbiter.conf.realms[sched.realm].potential_pollers)
assert 0 == len(self._arbiter.conf.realms[sched.realm].potential_reactionners)
assert 0 == len(self._arbiter.conf.realms[sched.realm].potential_receivers)
def test_realm_host_assignation(self):
""" Test host realm assignation
Test realms on each host
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/realms/several_realms.cfg', 'cfg/realms/several_realms.ini')
self.show_logs()
assert not self.conf_is_correct
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::in_realm2] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::in_realm2] host test_host3_hg_realm2 (realm: realm1) is not in "
"the same realm than its hostgroup in_realm2 (realm: realm2)"
))
# self.assert_any_cfg_log_match(re.escape(
# "hostgroup in_realm2 got the default realm but it has some hosts that are from different realms"
# ))
# Some error messages
assert len(self.configuration_errors) == 3
realm1 = self._arbiter.conf.realms.find_by_name('realm1')
assert realm1 is not None
realm2 = self._arbiter.conf.realms.find_by_name('realm2')
assert realm2 is not None
host = self._arbiter.conf.hosts.find_by_name('test_host_realm1')
assert realm1.uuid == host.realm
host = self._arbiter.conf.hosts.find_by_name('test_host_realm2')
assert realm2.uuid == host.realm
def test_undefined_used_realm(self):
""" Test undefined realm used in daemons
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/realms/use_undefined_realm.cfg')
self.show_logs()
assert not self.conf_is_correct
self.assert_any_cfg_log_match(re.escape(
"The scheduler 'Scheduler-distant' is affected to an unknown realm: 'Distant'"
))
self.assert_any_cfg_log_match(re.escape(
"The host 'bad_host' is affected to an unknown realm: 'Distant'"
))
def test_realm_hostgroup_assignation(self):
""" Test realm hostgroup assignation
Check realm and hostgroup
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/realms/several_realms.cfg', 'cfg/realms/several_realms.ini')
self.show_logs()
assert not self.conf_is_correct
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::in_realm2] Configuration is incorrect; "
))
self.assert_any_cfg_log_match(re.escape(
"[hostgroup::in_realm2] host test_host3_hg_realm2 (realm: realm1) is not "
"in the same realm than its hostgroup in_realm2 (realm: realm2)"
))
# self.assert_any_cfg_log_match(re.escape(
# "hostgroup in_realm2 got the default realm but it has some hosts that are from different realms: None and realm1. The realm cannot be adjusted!"
# ))
# Some error messages
assert len(self.configuration_errors) == 3
# Check all daemons exist
assert len(self._arbiter.conf.arbiters) == 1
assert len(self._arbiter.conf.schedulers) == 2
assert len(self._arbiter.conf.brokers) == 2
assert len(self._arbiter.conf.pollers) == 2
assert len(self._arbiter.conf.reactionners) == 1
assert len(self._arbiter.conf.receivers) == 1
for daemon in self._arbiter.conf.schedulers:
assert daemon.get_name() in ['Scheduler-1', 'Scheduler-2']
assert daemon.realm in self._arbiter.conf.realms
for daemon in self._arbiter.conf.brokers:
assert daemon.get_name() in ['Broker-1', 'Broker-2']
assert daemon.realm in self._arbiter.conf.realms
for daemon in self._arbiter.conf.pollers:
assert daemon.get_name() in ['Poller-1', 'Poller-2']
assert daemon.realm in self._arbiter.conf.realms
for daemon in self._arbiter.conf.receivers:
assert daemon.get_name() in ['receiver-master']
assert daemon.realm in self._arbiter.conf.realms
# Hostgroup in_realm2
in_realm2 = self._arbiter.conf.hostgroups.find_by_name('in_realm2')
# Realms
realm1 = self._arbiter.conf.realms.find_by_name('realm1')
assert realm1 is not None
realm2 = self._arbiter.conf.realms.find_by_name('realm2')
assert realm2 is not None
host = self._arbiter.conf.hosts.find_by_name('test_host_realm1')
assert realm1.uuid == host.realm
host = self._arbiter.conf.hosts.find_by_name('test_host_realm2')
assert realm2.uuid == host.realm
# test_host1 and test_host2 are linked to realm2 because they are in the hostgroup in_realm2
test_host1_hg_realm2 = self._arbiter.conf.hosts.find_by_name("test_host1_hg_realm2")
assert test_host1_hg_realm2 is not None
assert realm2.uuid == test_host1_hg_realm2.realm
assert in_realm2.get_name() in [self._arbiter.conf.hostgroups[hg].get_name() for hg in test_host1_hg_realm2.hostgroups]
test_host2_hg_realm2 = self._arbiter.conf.hosts.find_by_name("test_host2_hg_realm2")
assert test_host2_hg_realm2 is not None
assert realm2.uuid == test_host2_hg_realm2.realm
assert in_realm2.get_name() in [self._arbiter.conf.hostgroups[hg].get_name() for hg in test_host2_hg_realm2.hostgroups]
# test_host3 is linked to realm1 but its hostgroup in realm2!
test_host3_hg_realm2 = self._arbiter.conf.hosts.find_by_name("test_host3_hg_realm2")
assert test_host3_hg_realm2 is not None
assert realm1.uuid == test_host3_hg_realm2.realm
assert in_realm2.get_name() in [self._arbiter.conf.hostgroups[hg].get_name() for hg in test_host3_hg_realm2.hostgroups]
def test_sub_realms(self):
""" Test realm / sub-realm
All main daemons are in the realm World and manage the sub-realms except for the poller!
A second broker exist in the realm World and a receiver exist in the realm Paris
:return: None
"""
self.setup_with_file('cfg/realms/sub_realms.cfg', 'cfg/realms/sub_realms.ini',
verbose=False, dispatching=True)
assert self.conf_is_correct
print("Realms: %s" % self._arbiter.conf.realms)
world = self._arbiter.conf.realms.find_by_name('World')
print(world)
assert world is not None
europe = self._arbiter.conf.realms.find_by_name('Europe')
assert europe is not None
paris = self._arbiter.conf.realms.find_by_name('Paris')
assert paris is not None
# Get satellites of the World realm
assert len(world.get_satellites_by_type('arbiter')) == 0
satellites = world.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "arbiter")
assert len(satellites) == 0
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(world.get_satellites_by_type('scheduler')) == 1
satellites = world.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "scheduler")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(world.get_satellites_by_type('broker')) == 2
satellites = world.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "broker")
assert len(satellites) == 2
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(world.get_satellites_by_type('poller')) == 1
satellites = world.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "poller")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(world.get_satellites_by_type('receiver')) == 1
satellites = world.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "receiver")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(world.get_satellites_by_type('reactionner')) == 1
satellites = world.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "reactionner")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
# Get satellites of the Europe realm
assert europe.uuid in world.all_sub_members
assert len(europe.get_satellites_by_type('arbiter')) == 0
satellites = europe.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "arbiter")
assert len(satellites) == 0
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(europe.get_satellites_by_type('scheduler')) == 1
satellites = europe.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "scheduler")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(europe.get_satellites_by_type('broker')) == 0
satellites = europe.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "broker")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(europe.get_satellites_by_type('poller')) == 0
satellites = europe.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "poller")
assert len(satellites) == 0 # Because the master poller is not managing sub-realms! Else it should be 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(europe.get_satellites_by_type('receiver')) == 0
satellites = europe.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "receiver")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(europe.get_satellites_by_type('reactionner')) == 0
satellites = europe.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "reactionner")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
# Get satellites of the Paris realm
assert paris.uuid in europe.all_sub_members
assert len(paris.get_satellites_by_type('arbiter')) == 0
satellites = paris.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "arbiter")
assert len(satellites) == 0
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(paris.get_satellites_by_type('scheduler')) == 1
satellites = paris.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "scheduler")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(paris.get_satellites_by_type('broker')) == 0
satellites = paris.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "broker")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(paris.get_satellites_by_type('poller')) == 0
satellites = paris.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "poller")
assert len(satellites) == 0 # Because the master poller is not managing sub-realms! Else it should be 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(paris.get_satellites_by_type('receiver')) == 1
satellites = paris.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "receiver")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
assert len(paris.get_satellites_by_type('reactionner')) == 0
satellites = paris.get_potential_satellites_by_type(self._arbiter.dispatcher.all_daemons_links, "reactionner")
assert len(satellites) == 1
for sat_link in satellites:
print("%s / %s" % (sat_link.type, sat_link.name))
def test_sub_realms_assignations(self):
""" Test realm / sub-realm assignation
Realms:
World (default)
-> Europe
-> Paris
Satellites:
arbiter-master, manage_sub_realms=1
scheduler-master, manage_sub_realms=1
poller-master, manage_sub_realms=0
reactionner-master, manage_sub_realms=1
broker-master, manage_sub_realms=0
broker-B-world, manage_sub_realms=1
receiver-master, manage_sub_realms=1
One sub-realm brokers for the realm World : ok
One "not sub-realm" broker for the default realm, should not disturb !
On "not sub-realm" poller for the default realm, I should not have any poller
for the sub realms !
:return: None
"""
self.setup_with_file('cfg/realms/sub_realms.cfg', 'cfg/realms/sub_realms.ini',
verbose=False)
assert self.conf_is_correct
print("Realms: \n%s" % self._arbiter.conf.realms)
world = self._arbiter.conf.realms.find_by_name('World')
assert world is not None
europe = self._arbiter.conf.realms.find_by_name('Europe')
assert europe is not None
paris = self._arbiter.conf.realms.find_by_name('Paris')
assert paris is not None
# Get the B-world broker
# This broker is defined in the realm World and it manages sub-realms
bworld = self._arbiter.conf.brokers.find_by_name('broker-b')
assert bworld is not None
# broker should be in the world level
assert (bworld.uuid in world.brokers) is True
# in europe too
assert (bworld.uuid in europe.potential_brokers) is True
# and in paris too
assert (bworld.uuid in paris.potential_brokers) is True
# Get the master broker
# This broker is defined in the realm World and it does not manage sub-realms!
bmaster = self._arbiter.conf.brokers.find_by_name('broker-master')
assert bmaster is not None
# broker should be in the world level
assert (bmaster.uuid in world.brokers) is True
# but not in Europe !
assert (bmaster.uuid not in europe.potential_brokers) is True
# nor in paris!
assert (bmaster.uuid not in paris.potential_brokers) is True
# Get the master poller
# This poller is defined in the realm World and it does not manage sub-realms
sat = self._arbiter.conf.pollers.find_by_name('poller-master')
assert sat is not None
# poller should be in the world level
assert (sat.uuid in world.pollers) is True
# but not in Europe !
assert (sat.uuid not in europe.potential_pollers) is True
# nor in paris!
assert (sat.uuid not in paris.potential_pollers) is True
# Get the scheduler master that should be in all realms
sat = self._arbiter.conf.schedulers.find_by_name('scheduler-master')
assert (sat.uuid in world.schedulers) is True
assert (sat.uuid in europe.potential_schedulers) is True
assert (sat.uuid in paris.potential_schedulers) is True
# Get the reactionner master that should be in all realms
sat = self._arbiter.conf.reactionners.find_by_name('reactionner-master')
assert (sat.uuid in world.reactionners) is True
assert (sat.uuid in europe.potential_reactionners) is True
assert (sat.uuid in paris.potential_reactionners) is True
# Get the receiver master that should be in all realms
sat = self._arbiter.conf.receivers.find_by_name('receiver-master')
assert (sat.uuid in world.receivers) is True
assert (sat.uuid in europe.potential_receivers) is True
assert (sat.uuid in paris.potential_receivers) is True
def test_sub_realms_multi_levels(self):
""" Test realm / sub-realm / sub-sub-realms...
Realms:
World (default)
+ Asia
++ Japan
+++ Tokyo
+++ Osaka
+ Europe
++ Italy
+++ Torino
+++ Roma
++ France
+++ Paris
+++ Lyon
World2 (also references Asia as a sub-realm)
Satellites (declared):
arbiter-master, manage_sub_realms=1
scheduler-master, manage_sub_realms=1
poller-master, manage_sub_realms=1
reactionner-master, manage_sub_realms=1
broker-master, manage_sub_realms=1
receiver-master, manage_sub_realms=1
broker-france, manage_sub_realms=0 -> realm France
scheduler-france, manage_sub_realms=0 -> realm France
TODO: this test raises some error logs because of missing schedulers but the
configuration is accepted and looks correct! Note that this configuration is
a bit complicated and ambiguous!!!
:return: None
"""
self.setup_with_file('cfg/realms/sub_realms_multi_levels.cfg',
'cfg/realms/sub_realms_multi_levels.ini',
verbose=False)
assert self.conf_is_correct
self.show_logs()
print("Realms: \n%s " % self._arbiter.conf.realms)
Osaka = self._arbiter.conf.realms.find_by_name('Osaka')
assert Osaka is not None
Tokyo = self._arbiter.conf.realms.find_by_name('Tokyo')
assert Tokyo is not None
Japan = self._arbiter.conf.realms.find_by_name('Japan')
assert Japan is not None
Asia = self._arbiter.conf.realms.find_by_name('Asia')
assert Asia is not None
Torino = self._arbiter.conf.realms.find_by_name('Torino')
assert Torino is not None
Roma = self._arbiter.conf.realms.find_by_name('Roma')
assert Roma is not None
Italy = self._arbiter.conf.realms.find_by_name('Italy')
assert Italy is not None
Lyon = self._arbiter.conf.realms.find_by_name('Lyon')
assert Lyon is not None
Paris = self._arbiter.conf.realms.find_by_name('Paris')
assert Paris is not None
France = self._arbiter.conf.realms.find_by_name('France')
assert France is not None
Europe = self._arbiter.conf.realms.find_by_name('Europe')
assert Europe is not None
World = self._arbiter.conf.realms.find_by_name('World')
assert World is not None
# Check members for each realm - members list is an ordered list!
print(("The World: %s" % (World)))
assert World.realm_members != [Europe.get_name(), Asia.get_name()]
assert World.realm_members == [Asia.get_name(), Europe.get_name()]
print(("Asia: %s" % (Asia)))
assert Asia.realm_members == [Japan.get_name()]
assert Japan.realm_members != [Tokyo.get_name(), Osaka.get_name()]
assert Japan.realm_members == [Osaka.get_name(), Tokyo.get_name()]
print(("Europe: %s" % (Europe)))
assert Europe.realm_members == [France.get_name(), Italy.get_name()]
assert Italy.realm_members == [Roma.get_name(), Torino.get_name()]
assert France.realm_members == [Lyon.get_name(), Paris.get_name()]
# Check all_sub_members for each realm - ordered lists!
assert Lyon.all_sub_members == []
assert Paris.all_sub_members == []
assert France.all_sub_members == [Lyon.uuid, Paris.uuid]
assert Torino.all_sub_members == []
assert Roma.all_sub_members == []
assert Italy.all_sub_members == [Roma.uuid, Torino.uuid]
assert Osaka.all_sub_members == []
assert Tokyo.all_sub_members == []
assert Japan.all_sub_members == [Osaka.uuid, Tokyo.uuid]
assert Asia.all_sub_members == [Japan.uuid, Osaka.uuid, Tokyo.uuid]
assert Europe.all_sub_members == [France.uuid, Lyon.uuid, Paris.uuid,
Italy.uuid, Roma.uuid, Torino.uuid]
assert World.all_sub_members_names == [
'Asia',
'Japan', 'Osaka', 'Tokyo',
'Europe',
'France', 'Lyon', 'Paris',
'Italy', 'Roma', 'Torino']
assert World.all_sub_members == [
Asia.uuid,
Japan.uuid, Osaka.uuid, Tokyo.uuid,
Europe.uuid,
France.uuid, Lyon.uuid, Paris.uuid,
Italy.uuid, Roma.uuid, Torino.uuid]
# Check satellites defined in each realms
poller_uuid = self._arbiter.conf.pollers.find_by_name('poller-master').uuid
receiver_uuid = self._arbiter.conf.receivers.find_by_name('receiver-master').uuid
reactionner_uuid = self._arbiter.conf.reactionners.find_by_name('reactionner-master').uuid
scheduler_uuid = self._arbiter.conf.schedulers.find_by_name('scheduler-master').uuid
broker_uuid = self._arbiter.conf.brokers.find_by_name('broker-master').uuid
# Specific France realm satellites
scheduler_france1_uuid = self._arbiter.conf.schedulers.find_by_name('scheduler-france1').uuid
scheduler_france2_uuid = self._arbiter.conf.schedulers.find_by_name('scheduler-france2').uuid
broker_france_uuid = self._arbiter.conf.brokers.find_by_name('broker-france').uuid
for broker in self._arbiter.conf.brokers:
print("Broker: %s" % (broker))
# World has some satellites
for realm in [World]:
assert realm.pollers == [poller_uuid]
assert realm.receivers == [receiver_uuid]
assert realm.reactionners == [reactionner_uuid]
assert realm.schedulers == [scheduler_uuid]
assert realm.brokers == [broker_uuid]
assert realm.potential_brokers == []
assert realm.potential_schedulers == []
assert realm.potential_pollers == []
assert realm.potential_receivers == []
assert realm.potential_reactionners == []
# These realms have some potential satellites but no direct ones
for realm in [Europe, Italy, Roma, Torino, Asia, Japan, Osaka, Tokyo]:
assert realm.brokers == []
assert realm.schedulers == []
assert realm.pollers == []
assert realm.receivers == []
assert realm.reactionners == []
assert realm.potential_pollers == [poller_uuid]
assert realm.potential_receivers == [receiver_uuid]
assert realm.potential_reactionners == [reactionner_uuid]
assert realm.potential_schedulers == [scheduler_uuid]
assert realm.potential_brokers == [broker_uuid]
# France has some direct satellites
for realm in [France]:
assert realm.brokers == [broker_france_uuid]
assert scheduler_france1_uuid in realm.schedulers
assert scheduler_france2_uuid in realm.schedulers
assert len(realm.schedulers) == 2
assert realm.pollers == []
assert realm.receivers == []
assert realm.reactionners == []
assert realm.potential_pollers == [poller_uuid]
assert realm.potential_receivers == [receiver_uuid]
assert realm.potential_reactionners == [reactionner_uuid]
assert realm.potential_schedulers == [scheduler_uuid]
assert realm.potential_brokers == [broker_uuid]
# France sub-realms have some potential satellites
for realm in [Paris, Lyon]:
assert realm.brokers == []
assert realm.schedulers == []
assert realm.pollers == []
assert realm.receivers == []
assert realm.reactionners == []
assert realm.potential_pollers == [poller_uuid]
assert realm.potential_receivers == [receiver_uuid]
assert realm.potential_reactionners == [reactionner_uuid]
assert scheduler_uuid in realm.potential_schedulers
assert scheduler_france2_uuid in realm.potential_schedulers
assert scheduler_france2_uuid in realm.potential_schedulers
assert len(realm.potential_schedulers) == 3
assert broker_uuid in realm.potential_brokers
# assert broker_france_uuid in realm.potential_brokers
assert len(realm.potential_brokers) == 1
def test_sub_realms_multi_levels_loop(self):
""" Test realm / sub-realm / sub-sub-realms... with a loop, so exit with error message
:return: None
"""
with pytest.raises(SystemExit):
self.setup_with_file('cfg/cfg_realms_sub_multi_levels_loop.cfg')
assert not self.conf_is_correct
self.show_configuration_logs()
|
class TestRealms(AlignakTest):
'''
This class test realms usage
'''
def setUp(self):
pass
def test_no_defined_realm(self):
''' Test configuration with no defined realm
Load a configuration with no realm defined:
- Alignak defines a default realm
- All hosts with no realm defined are in this default realm
:return: None
'''
pass
def test_default_realm(self):
''' Test configuration with no defined realm
Load a configuration with no realm defined:
- Alignak defines a default realm
- All hosts with no realm defined are in this default realm
:return: None
'''
pass
def test_no_defined_daemons(self):
''' Test configuration with no defined daemons
Load a configuration with no realm nor daemons defined:
- Alignak defines a default realm
- All hosts with no realm defined are in this default realm
- Alignak defines default daemons
:return: None
'''
pass
def test_no_scheduler_in_realm(self):
''' Test missing scheduler in realm
A realm is defined but no scheduler, nor broker, nor poller exist for this realm
Configuration is not correct
:return: None
'''
pass
def test_no_scheduler_in_realm_self_add(self):
''' Test missing scheduler in realm, self add a scheduler
A realm is defined but no scheduler, nor broker, nor poller exist for this realm
:return: None
'''
pass
def test_no_broker_in_realm(self):
''' Test missing broker in realm
Test realms on each host
:return: None
'''
pass
def test_realm_host_assignation(self):
''' Test host realm assignation
Test realms on each host
:return: None
'''
pass
def test_undefined_used_realm(self):
''' Test undefined realm used in daemons
:return: None
'''
pass
def test_realm_hostgroup_assignation(self):
''' Test realm hostgroup assignation
Check realm and hostgroup
:return: None
'''
pass
def test_sub_realms(self):
''' Test realm / sub-realm
All main daemons are in the realm World and manage the sub-realms except for the poller!
A second broker exist in the realm World and a receiver exist in the realm Paris
:return: None
'''
pass
def test_sub_realms_assignations(self):
''' Test realm / sub-realm assignation
Realms:
World (default)
-> Europe
-> Paris
Satellites:
arbiter-master, manage_sub_realms=1
scheduler-master, manage_sub_realms=1
poller-master, manage_sub_realms=0
reactionner-master, manage_sub_realms=1
broker-master, manage_sub_realms=0
broker-B-world, manage_sub_realms=1
receiver-master, manage_sub_realms=1
One sub-realm brokers for the realm World : ok
One "not sub-realm" broker for the default realm, should not disturb !
On "not sub-realm" poller for the default realm, I should not have any poller
for the sub realms !
:return: None
'''
pass
def test_sub_realms_multi_levels(self):
''' Test realm / sub-realm / sub-sub-realms...
Realms:
World (default)
+ Asia
++ Japan
+++ Tokyo
+++ Osaka
+ Europe
++ Italy
+++ Torino
+++ Roma
++ France
+++ Paris
+++ Lyon
World2 (also references Asia as a sub-realm)
Satellites (declared):
arbiter-master, manage_sub_realms=1
scheduler-master, manage_sub_realms=1
poller-master, manage_sub_realms=1
reactionner-master, manage_sub_realms=1
broker-master, manage_sub_realms=1
receiver-master, manage_sub_realms=1
broker-france, manage_sub_realms=0 -> realm France
scheduler-france, manage_sub_realms=0 -> realm France
TODO: this test raises some error logs because of missing schedulers but the
configuration is accepted and looks correct! Note that this configuration is
a bit complicated and ambiguous!!!
:return: None
'''
pass
def test_sub_realms_multi_levels_loop(self):
''' Test realm / sub-realm / sub-sub-realms... with a loop, so exit with error message
:return: None
'''
pass
| 15 | 14 | 58 | 10 | 36 | 13 | 4 | 0.35 | 1 | 2 | 0 | 0 | 14 | 0 | 14 | 69 | 833 | 148 | 508 | 76 | 493 | 179 | 470 | 76 | 455 | 19 | 2 | 2 | 54 |
4,093 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_resultmodulation.py
|
tests.test_resultmodulation.TestResultModulation
|
class TestResultModulation(AlignakTest):
def setUp(self):
super(TestResultModulation, self).setUp()
self.setup_with_file('cfg/cfg_result_modulation.cfg',
dispatching=True)
assert self.conf_is_correct
# Our scheduler
self._sched = self._scheduler
def test_service_resultmodulation(self):
""" Test result modulations """
# Get the host
host = self._sched.hosts.find_by_name("test_host_0")
assert host is not None
host.checks_in_progress = []
host.act_depend_of = []
assert len(host.resultmodulations) == 0
# Get the host modulated service
svc = self._sched.services.find_srv_by_name_and_hostname("test_host_0",
"test_ok_0_resmod")
assert svc is not None
svc.checks_in_progress = []
svc.act_depend_of = []
assert len(svc.resultmodulations) == 1
# Get the result modulations
mod = self._sched.resultmodulations.find_by_name("critical_is_warning")
assert mod is not None
assert mod.get_name() == "critical_is_warning"
assert mod.is_active(self._sched.timeperiods)
assert mod.uuid in svc.resultmodulations
# The host is UP
# The service is going CRITICAL/HARD ...
self.scheduler_loop(3, [
[host, 0, 'UP'],
[svc, 2, 'BAD | value1=0 value2=0']
])
# The service has a result modulation. So CRITICAL is transformed as WARNING.
self.assertEqual('WARNING', svc.state)
self.assertEqual('HARD', svc.state_type)
# Even after a second run
self.scheduler_loop(3, [
[host, 0, 'UP'],
[svc, 2, 'BAD | value1=0 value2=0']
])
# The service has a result modulation. So CRITICAL is transformed as WARNING.
self.assertEqual('WARNING', svc.state)
self.assertEqual('HARD', svc.state_type)
# Without the resultmodulations, we should have the usual behavior
svc.resultmodulations = []
self.scheduler_loop(3, [
[host, 0, 'UP'],
[svc, 2, 'BAD | value1=0 value2=0']
])
self.assertEqual('CRITICAL', svc.state)
self.assertEqual('HARD', svc.state_type)
def test_inherited_modulation(self):
""" Test inherited host/service result modulations
Resultmodulation is a implicit inherited parameter and router defines it,
but not test_router_0_resmod/test_ok_0_resmod.
Despite this service should also be impacted
"""
# Get the router
router = self._sched.hosts.find_by_name("test_router_0_resmod")
router.checks_in_progress = []
router.act_depend_of = []
assert router is not None
assert len(router.resultmodulations) == 1
# Get the service
svc2 = self._sched.services.find_srv_by_name_and_hostname("test_router_0_resmod",
"test_ok_0_resmod")
assert svc2 is not None
svc2.checks_in_progress = []
svc2.act_depend_of = []
assert len(svc2.resultmodulations) == 1
assert router.resultmodulations == svc2.resultmodulations
# Get the result modulations
mod = self._sched.resultmodulations.find_by_name("critical_is_warning")
assert mod is not None
assert mod.get_name() == "critical_is_warning"
assert mod.is_active(self._sched.timeperiods)
assert mod.uuid in svc2.resultmodulations
# The router is UP
# The service is going CRITICAL/HARD ...
self.scheduler_loop(3, [
[router, 0, 'UP'],
[svc2, 2, 'BAD | value1=0 value2=0']
])
# The service has a result modulation. So CRITICAL is transformed as WARNING.
self.assertEqual('WARNING', svc2.state)
self.assertEqual('HARD', svc2.state_type)
|
class TestResultModulation(AlignakTest):
def setUp(self):
pass
def test_service_resultmodulation(self):
''' Test result modulations '''
pass
def test_inherited_modulation(self):
''' Test inherited host/service result modulations
Resultmodulation is a implicit inherited parameter and router defines it,
but not test_router_0_resmod/test_ok_0_resmod.
Despite this service should also be impacted
'''
pass
| 4 | 2 | 33 | 3 | 22 | 7 | 1 | 0.33 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 58 | 101 | 12 | 67 | 11 | 63 | 22 | 52 | 11 | 48 | 1 | 2 | 0 | 3 |
4,094 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_retention.py
|
tests.test_retention.TestRetention
|
class TestRetention(AlignakTest):
"""
This class test retention
"""
def setUp(self):
super(TestRetention, self).setUp()
def test_default_is_disabled_retention(self):
""" Test that default configuration is retention disabled
:return: None
"""
self.setup_with_file('cfg/cfg_default.cfg',
dispatching=True)
# Default configuration has no retention configured
assert self._arbiter.conf.retention_update_interval == 0
expected_logs = [
]
self.check_monitoring_events_log(expected_logs)
def test_retention_enabled(self):
""" Test that when retention is enabled whe have a log
:return: None
"""
self.setup_with_file('cfg/cfg_default_retention.cfg',
dispatching=True)
# Default configuration has no retention configured
assert self._arbiter.conf.retention_update_interval == 5
self.scheduler_loop(5, [])
time.sleep(1)
expected_logs = [
('info', 'RETENTION LOAD: scheduler-master'),
# ('info', 'CURRENT HOST STATE: test_router_0;UP;HARD;1;UP and OK'),
# ('error', 'CURRENT HOST STATE: test_host_0;DOWN;HARD;3;DOWN!'),
# ('warning', 'CURRENT SERVICE STATE: test_host_0;test_ok_0;UNREACHABLE;HARD;0;'),
# ('info', 'HOST DOWNTIME ALERT: test_host_0;STOPPED; Host has exited from a period of scheduled downtime'),
# ('info', 'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMEEND (DOWN);1;notify-host;DOWN!'),
]
self.check_monitoring_events_log(expected_logs, assert_length=False)
def test_scheduler_retention(self):
""" Test save and restore retention data
:return: None
"""
# Delete a potential existing retention file...
if os.path.exists('/tmp/alignak/retention/test_host_0.json'):
os.remove('/tmp/alignak/retention/test_host_0.json')
if os.path.exists('/tmp/alignak/retention/test_router_0.json'):
os.remove('/tmp/alignak/retention/test_router_0.json')
self.setup_with_file('cfg/cfg_default_retention.cfg',
dispatching=True)
self.show_logs()
# Retention is configured
assert self._scheduler.pushed_conf.retention_update_interval == 5
# assert self._scheduler.pushed_conf.tick_update_retention == 5
router = self._scheduler.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.event_handler_enabled = False
host = self._scheduler.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
host.event_handler_enabled = False
svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(1, [[router, 0, 'UP and OK']])
self.scheduler_loop(3, [[host, 2, 'DOWN!']])
time.sleep(1.0)
self.scheduler_loop(1)
# time.sleep(1.0)
# self.scheduler_loop(1)
assert host.state_type == 'HARD'
assert host.state == 'DOWN'
assert svc.state_type == 'HARD'
assert svc.state == 'UNREACHABLE' # Because it is impacted by its host which is DOWN
now = int(time.time())
# Scheduler a downtime for the host
excmd = '[%d] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;test_contact;My downtime' \
% (now, now, now + 1200)
self._scheduler.run_external_commands([excmd])
self.external_command_loop()
time.sleep(1.0)
expected_logs = [
("info", "RETENTION LOAD: scheduler-master"),
('info', 'CURRENT HOST STATE: test_router_0;UP;HARD;0;'),
('info', 'CURRENT HOST STATE: test_host_0;UP;HARD;0;'),
('info', 'CURRENT SERVICE STATE: test_host_0;test_ok_0;OK;HARD;0;'),
("info", "ACTIVE HOST CHECK: test_router_0;UP;0;UP and OK"),
("error", "ACTIVE HOST CHECK: test_host_0;DOWN;0;DOWN!"),
("error", "HOST ALERT: test_host_0;DOWN;SOFT;1;DOWN!"),
("error", "ACTIVE HOST CHECK: test_host_0;DOWN;1;DOWN!"),
("error", "HOST ALERT: test_host_0;DOWN;SOFT;2;DOWN!"),
("error", "ACTIVE HOST CHECK: test_host_0;DOWN;2;DOWN!"),
("error", "HOST ALERT: test_host_0;DOWN;HARD;3;DOWN!"),
("error", "HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;DOWN!"),
("info", "EXTERNAL COMMAND: [%s] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;test_contact;My downtime" % (now, now, now + 1200)),
("info", "HOST DOWNTIME ALERT: test_host_0;STARTED; Host has entered a period of scheduled downtime"),
("info", "HOST ACKNOWLEDGE ALERT: test_host_0;STARTED; Host problem has been acknowledged"),
("info", "SERVICE ACKNOWLEDGE ALERT: test_host_0;test_ok_0;STARTED; Service problem has been acknowledged"),
("info", "HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMESTART (DOWN);1;notify-host;DOWN!")
]
self.check_monitoring_events_log(expected_logs)
# The host is DOWN and downtimed...
host_notifications = []
print("Notifications:")
for notification_uuid, notification in host.notifications_in_progress.items():
print("- %s" % (notification))
host_notifications.append(notification)
# Master notification
# Notification for host problem
# Notification for host downtime
assert 3 == len(host.notifications_in_progress)
host_comments = []
host_comment_id = None
print("Comments (host):")
for comm_uuid, comment in host.comments.items():
print("- %s" % (comment))
host_comments.append(comment.comment)
if comment.entry_type == 4:
host_comment_id = comment.uuid
# Comment for host problem
# Comment for host ack because of the downtime
assert 2 == len(host.comments)
service_comments = []
service_comment_id = None
print("Comments (service):")
for comm_uuid, comment in svc.comments.items():
print("- %s" % (comment))
service_comments.append(comment.comment)
if comment.entry_type == 4:
service_comment_id = comment.uuid
# Comment for svc ack because of the host downtime
assert 1 == len(svc.comments)
assert True == host.problem_has_been_acknowledged
assert host.acknowledgement.__dict__ == {
'uuid': host.acknowledgement.uuid,
'author': 'Alignak',
'comment': 'Acknowledged because of an host downtime',
'ref': host.uuid,
'sticky': True,
'end_time': 0,
'notify': 1,
"comment_id": host_comment_id
}
assert True == svc.problem_has_been_acknowledged
assert svc.acknowledgement.__dict__ == {
"uuid": svc.acknowledgement.uuid,
"author": "Alignak",
"comment": "Acknowledged because of an host downtime",
"ref": svc.uuid,
"sticky": False,
"end_time": 0,
"notify": True,
"comment_id": service_comment_id
}
# Prepare retention data to be stored
retention = self._scheduler.get_retention_data()
assert 'hosts' in retention
assert 'services' in retention
print('Hosts retention:')
for host_name in retention['hosts']:
print('- %s' % host_name)
assert len(retention['hosts']) == 2
print('Services retention:')
for host_name, service_description in retention['services']:
print('- %s - %s' % (host_name, service_description))
assert len(retention['services']) == 1
print('Services retention:')
for service in retention['services']:
print('- %s / %s' % (service[0], service[1]))
assert len(retention['services']) == 1
# Test if it can be JSON dumped (serialization...)
print('Hosts retention:')
for host_name in retention['hosts']:
try:
t = json.dumps(retention['hosts'][host_name])
except TypeError as exp:
assert True, "Some set() property makes JSON not directly possible!"
# Make the json dump possible thanks to an helper dump function
try:
t = json.dumps(retention['hosts'][host_name],
default=default_serialize)
except Exception as err:
assert False, 'Json dumps impossible: %s' % err
assert "notifications_in_progress" in t
assert "downtimes" in t
assert "acknowledgement" in t
for service in retention['services']:
try:
t = json.dumps(retention['services'][service])
except Exception as err:
assert False, 'Json dumps impossible: %s' % str(err)
assert "notifications_in_progress" in t
assert "downtimes" in t
assert "acknowledgement" in t
self._scheduler.update_recurrent_works_tick({'tick_update_retention': 1})
# Test that after get retention nothing got broken
self.scheduler_loop(1, [[host, 2, 'DOWN!']])
time.sleep(0.1)
expected_logs += [
("info", "RETENTION SAVE: scheduler-master"),
("error", "ACTIVE HOST CHECK: test_host_0;DOWN;3;DOWN!"),
]
self.check_monitoring_events_log(expected_logs)
self.show_logs()
assert os.path.exists('/tmp/alignak/retention/test_host_0.json')
with open('/tmp/alignak/retention/test_host_0.json', "r") as fd:
retention_check = json.load(fd)
pprint.pprint(retention_check)
assert 'name' in retention_check
assert retention_check['name'] == 'test_host_0'
assert os.path.exists('/tmp/alignak/retention/test_router_0.json')
# ************** test the restoration of retention ************** #
# new conf
self.setup_with_file('cfg/cfg_default_retention.cfg',
dispatching=True)
expected_logs = [
('info', 'RETENTION LOAD: scheduler-master'),
]
self.check_monitoring_events_log(expected_logs)
hostn = self._scheduler.hosts.find_by_name("test_host_0")
hostn.checks_in_progress = []
hostn.act_depend_of = [] # ignore the router
hostn.event_handler_enabled = False
svcn = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svcn.checks_in_progress = []
svcn.act_depend_of = [] # no hostchecks on critical checkresults
# Host and service should be restored with the former states
assert hostn.state_type == 'HARD'
assert hostn.state == 'DOWN'
assert svcn.state_type == 'HARD'
assert svcn.state == 'UNREACHABLE'
assert hostn.last_state == 'DOWN'
assert svcn.last_state == 'PENDING' # Never tested!
# Not the same identifier
assert host.uuid != hostn.uuid
self.scheduler_loop(1, [[hostn, 0, 'UP'], [svcn, 1, 'WARNING']])
time.sleep(1.0)
self.scheduler_loop(1)
expected_logs += [
('info', 'CURRENT HOST STATE: test_router_0;UP;HARD;1;UP and OK'),
('error', 'CURRENT HOST STATE: test_host_0;DOWN;HARD;3;DOWN!'),
('warning', 'CURRENT SERVICE STATE: test_host_0;test_ok_0;UNREACHABLE;HARD;0;'),
('warning', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;WARNING;0;WARNING'),
('info', 'SERVICE ACKNOWLEDGE ALERT: test_host_0;test_ok_0;EXPIRED; Service problem acknowledge expired'),
('warning', 'SERVICE ALERT: test_host_0;test_ok_0;WARNING;HARD;0;WARNING'),
('warning', 'SERVICE EVENT HANDLER: test_host_0;test_ok_0;WARNING;HARD;0;eventhandler'),
('info', 'ACTIVE HOST CHECK: test_host_0;UP;3;UP'),
('info', 'HOST ACKNOWLEDGE ALERT: test_host_0;EXPIRED; Host problem acknowledge expired'),
('info', 'HOST ALERT: test_host_0;UP;HARD;3;UP'),
]
self.check_monitoring_events_log(expected_logs)
assert 2 == len(hostn.comments)
assert 2 == len(hostn.notifications_in_progress)
# check downtimes (only for host and not for service)
print("Host downtimes: ")
for downtime in host.downtimes:
print('- %s' % (downtime))
print("HostN downtimes: ")
for downtime in hostn.downtimes:
print('- %s' % (downtime))
assert list(host.downtimes) == list(hostn.downtimes)
for down_uuid, downtime in hostn.downtimes.items():
assert 'My downtime' == downtime.comment
# check notifications
print("Host notifications: ")
for notif_uuid in host.notifications_in_progress:
print('- %s / %s' % (notif_uuid, host.notifications_in_progress[notif_uuid]))
print("HostN notifications: ")
for notif_uuid in hostn.notifications_in_progress:
print('- %s / %s' % (notif_uuid, hostn.notifications_in_progress[notif_uuid]))
for notif_uuid, notification in hostn.notifications_in_progress.items():
print(notif_uuid, notification)
if notif_uuid not in host.notifications_in_progress:
continue
assert host.notifications_in_progress[notif_uuid].command == \
notification.command
assert host.notifications_in_progress[notif_uuid].t_to_go == \
notification.t_to_go
# Notifications: host ack, service ack, host downtime
assert 2 == len(hostn.notifications_in_progress)
# check comments for host
assert len(host.comments) == len(hostn.comments)
commentshn = []
for comm_uuid, comment in hostn.comments.items():
commentshn.append(comment.comment)
# Compare sorted comments because dictionairies are not ordered
assert sorted(host_comments) == sorted(commentshn)
# check comments for service
assert len(svc.comments) == len(svcn.comments)
commentssn = []
for comm_uuid, comment in svcn.comments.items():
commentssn.append(comment.comment)
assert service_comments == commentssn
# check notified_contacts
assert isinstance(hostn.notified_contacts, list)
assert isinstance(svcn.notified_contacts, list)
assert [self._scheduler.contacts.find_by_name("test_contact").uuid] == \
hostn.notified_contacts_ids
|
class TestRetention(AlignakTest):
'''
This class test retention
'''
def setUp(self):
pass
def test_default_is_disabled_retention(self):
''' Test that default configuration is retention disabled
:return: None
'''
pass
def test_retention_enabled(self):
''' Test that when retention is enabled whe have a log
:return: None
'''
pass
def test_scheduler_retention(self):
''' Test save and restore retention data
:return: None
'''
pass
| 5 | 4 | 83 | 11 | 62 | 13 | 7 | 0.21 | 1 | 6 | 0 | 0 | 4 | 0 | 4 | 59 | 341 | 46 | 248 | 36 | 243 | 53 | 184 | 33 | 179 | 25 | 2 | 2 | 28 |
4,095 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.Test_ArbiterLink_get_name
|
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link arbiter"""
def setUp(self):
super(Test_ArbiterLink_get_name, self).setUp()
daemon_link = ArbiterLink
|
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, AlignakTest):
'''Test satellite link arbiter'''
def setUp(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 58 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,096 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.Test_BrokerLink_get_name
|
class Test_BrokerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link broker"""
def setUp(self):
super(Test_BrokerLink_get_name, self).setUp()
daemon_link = BrokerLink
|
class Test_BrokerLink_get_name(template_DaemonLink_get_name, AlignakTest):
'''Test satellite link broker'''
def setUp(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 58 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,097 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/tests/test_satellite_link.py
|
tests.test_satellite_link.Test_PollerLink_get_name
|
class Test_PollerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link poller"""
def setUp(self):
super(Test_PollerLink_get_name, self).setUp()
daemon_link = PollerLink
|
class Test_PollerLink_get_name(template_DaemonLink_get_name, AlignakTest):
'''Test satellite link poller'''
def setUp(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 2 | 1 | 0 | 0 | 1 | 0 | 1 | 58 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
4,098 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/pollerlink.py
|
alignak.objects.pollerlink.PollerLink
|
class PollerLink(SatelliteLink):
"""
Class to manage the link between Arbiter and Poller. With this, an arbiter
can communicate with a poller
"""
my_type = 'poller'
my_name_property = "%s_name" % my_type
# To_send: send or not to satellite conf
properties = SatelliteLink.properties.copy()
properties.update({
'type':
StringProp(default=u'poller', fill_brok=[FULL_STATUS], to_send=True),
'poller_name':
StringProp(default='', fill_brok=[FULL_STATUS]),
'port':
IntegerProp(default=7771, fill_brok=[FULL_STATUS], to_send=True),
'poller_tags':
ListProp(default=['None'], to_send=True),
})
|
class PollerLink(SatelliteLink):
'''
Class to manage the link between Arbiter and Poller. With this, an arbiter
can communicate with a poller
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.36 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 20 | 1 | 14 | 4 | 13 | 5 | 5 | 4 | 4 | 0 | 4 | 0 | 0 |
4,099 |
Alignak-monitoring/alignak
|
Alignak-monitoring_alignak/alignak/objects/item.py
|
alignak.objects.item.Items
|
class Items(object):
"""
Class to manage all Item objects of the same type
"""
inner_class = Item
def __init__(self, items, index_items=True, parsing=True):
self.items = {}
self.name_to_item = {}
self.templates = {}
self.name_to_template = {}
# Assuming a default correct configuration
self.conf_is_correct = True
self.configuration_warnings = []
self.configuration_errors = []
if parsing:
# We are creating new items from the configuration
self.add_items(items, index_items)
return
# We are un-serializing
logger.debug("Un-serializing: %s", self.__class__)
try:
items = json.loads(items)
except Exception as exp:
logger.error("Items unserialize exception: %s", exp)
print("Items unserialize exception: %s" % exp)
if 'items' in items:
for uuid in items['items']:
_item = unserialize(items['items'][uuid], no_json=True)
self.add_item(_item)
if 'templates' in items:
for uuid in items['templates']:
_tpl = unserialize(items['templates'][uuid], no_json=True)
self.add_template(_tpl)
logger.debug("Un-serialized: %d items and %d templates",
len(self.items), len(self.templates))
def __repr__(self): # pragma: no cover
# Build a sorted list of the 10 first elements name, this to make it easier to compare ;)
dump_list = sorted([str(item.get_full_name()
if isinstance(item, Item) else item) for item in self])[:10]
if self.templates:
return "<%s, %d templates, %d elements: %s.../>" \
% (self.__class__.__name__, len(self.templates), len(self.items), dump_list)
return "<%s, %d elements: %s/>" % (self.__class__.__name__, len(self.items), dump_list)
__str__ = __repr__
def __iter__(self):
return iter(list(self.items.values()))
def __len__(self):
return len(self.items)
def __delitem__(self, key):
try:
self.unindex_item(self.items[key])
del self.items[key]
except KeyError: # we don't want it, we do not have it. All is perfect
pass
def __setitem__(self, key, value):
self.items[key] = value
self.index_item(value)
def __getitem__(self, key):
"""Get a specific objects for Items dict.
Ie : a host in the Hosts dict, a service in the Service dict etc.
:param key: object uuid
:type key: str
:return: The wanted object
:rtype: alignak.object.item.Item
"""
return self.items[key] if key else None
def __contains__(self, key):
return key in self.items
def add_error(self, error_message):
"""Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param error_message: error message
:type error_message: str
:return: None
"""
if not hasattr(self, 'configuration_errors'):
self.configuration_errors = []
if isinstance(error_message, list):
self.configuration_errors += error_message
else:
self.configuration_errors.append(error_message)
self.conf_is_correct = False
def add_warning(self, warning_message):
"""Add a message in the configuration warnings list so we can print them
all in one place
:param warning_message: warning message
:type warning_message: str
:return: None
"""
if not hasattr(self, 'configuration_warnings'):
self.configuration_warnings = []
if isinstance(warning_message, list):
self.configuration_warnings += warning_message
else:
self.configuration_warnings.append(warning_message)
def add_items(self, items, index_items):
"""
Add items to the templates list if it is a template, else add in the items list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None
"""
count_templates = 0
count_items = 0
generated_items = []
for item in items:
if item.is_a_template():
self.add_template(item)
count_templates += 1
else:
new_items = self.add_item(item, index_items)
count_items += max(1, len(new_items))
if new_items:
generated_items.extend(new_items)
if count_templates:
logger.info(' indexed %d template(s)', count_templates)
if count_items:
logger.info(' created %d %s(s).', count_items, self.inner_class.my_type)
def manage_conflict(self, item, name):
"""
Checks if an object holding the same name already exists in the index.
If so, it compares their definition order: the lowest definition order
is kept. If definition order equals, an error is risen.
The method returns the item that should be added after it has decided
which one should be kept.
If the new item has precedence over the New existing one, the
existing is removed for the new to replace it.
:param item: object to check for conflict
:type item: alignak.objects.item.Item
:param name: name of the object
:type name: str
:return: 'item' parameter modified
:rtype: object
"""
existing = self.name_to_template[name] if item.is_a_template() else self.name_to_item[name]
if existing == item:
return item
# Get (and set) the definition order if any else the default values
existing_prio = getattr(existing, "definition_order",
existing.properties["definition_order"].default)
existing.definition_order = existing_prio
item_prio = getattr(item, "definition_order",
item.properties["definition_order"].default)
item.definition_order = item_prio
if existing_prio < item_prio:
# Existing item has lower priority, so it has precedence.
logger.info("naming conflict, I kept the existing object: %s, "
"definition order: %d, imported from: %s and I rejected: %s, "
"definition order: %d, imported from: %s",
existing.get_name(), existing.definition_order, existing.imported_from,
item.get_name(), item.definition_order, item.imported_from)
return existing
if existing_prio > item_prio:
# New item has lower priority, so it has precedence.
# Existing item will be deleted below
logger.info("naming conflict, I kept the most recent object: %s, "
"definition order: %d, imported from: %s and I rejected the existing: %s, "
"definition order: %d, imported from: %s",
item.get_name(), item.definition_order, item.imported_from,
existing.get_name(), existing.definition_order, existing.imported_from)
else:
# Don't know which one to keep, lastly defined has precedence
objcls = getattr(self.inner_class, "my_type", "[unknown]")
item.add_warning("duplicate %s '%s', from: '%s' and '%s', using lastly defined. "
"You may manually set the definition_order parameter to avoid "
"this message."
% (objcls, name, item.imported_from, existing.imported_from))
if item.is_a_template():
self.remove_template(existing)
else:
self.remove_item(existing)
return item
def add_template(self, template):
"""
Add and index a template into the `templates` container.
:param template: The template to add
:type template: alignak.objects.item.Item
:return: None
"""
template = self.index_template(template)
self.templates[template.uuid] = template
def index_template(self, template):
"""
Indexes a template by `name` into the `name_to_template` dictionary.
If an object holding the same item's name/key already exists in the index
then the conflict is managed by the `manage_conflict` method.
:param template: The template to index
:type template: alignak.objects.item.Item
:return: None
"""
name = template.get_name(index=True)
if not name:
template.add_error("a %s template has been defined without name, from: %s"
% (self.inner_class.my_type, template.imported_from))
elif name in self.name_to_template:
template = self.manage_conflict(template, name)
self.name_to_template[name] = template
logger.debug("Indexed a %s template: %s, using: %s",
template.my_type, name, getattr(template, 'use', 'Nothing'))
return template
def remove_template(self, template):
"""
Removes and un-index a template from the `templates` container.
:param template: The template to remove
:type template: alignak.objects.item.Item
:return: None
"""
logger.debug("Removing %s template: %s", template.my_type, template.get_name())
self.unindex_template(template)
self.templates.pop(template.uuid, None)
def unindex_template(self, template):
"""
Unindex a template from the `templates` container.
:param template: The template to un-index
:type template: alignak.objects.item.Item
:return: None
"""
try:
del self.name_to_template[template.get_name(index=True)]
except KeyError: # pragma: no cover, simple protection
pass
def add_item(self, item, index=True): # pylint: disable=too-many-locals, too-many-branches
"""
Add an item into our containers, and index it depending on the `index` flag.
:param item: object to add
:type item: alignak.objects.item.Item
:param index: Flag indicating if the item should be indexed
:type index: bool
:return: the new items created
:rtype list
"""
# name_property = getattr(self.__class__.inner_class, "my_name_property", None)
# if not name_property: # pragma: no cover, never called
# raise "Missing my_name_property in class: %s" % self.__class__.inner_class
#
# todo: make a specific inherited method for the hosts... else allow for all objects ;)
# Check if some hosts are to be self-generated...
generated_hosts = []
# name = getattr(item, name_property, None)
name = item.get_name(index=True)
if name and '[' in name and ']' in name:
# We can create several objects from the same configuration!
pattern = name[name.find("[")+1:name.find("]")]
if '-' in pattern:
logger.debug("Found an host with a patterned name: %s", pattern)
# pattern is format-min-max
# format is optional
limits = pattern.split('-')
fmt = "%d"
min_v = 1
max_v = 1
if len(limits) == 3:
fmt = limits[2]
new_name = name.replace('[%s-%s-%s]' % (limits[0], limits[1], fmt), '***')
else:
new_name = name.replace('[%s-%s]' % (limits[0], limits[1]), '***')
try:
min_v = int(limits[0])
except ValueError:
pass
try:
max_v = int(limits[1])
except ValueError:
pass
for idx in range(min_v, max_v + 1):
logger.debug("- cloning host: %s", new_name.replace('***', fmt % idx))
new_host = deepcopy(item)
new_host.uuid = get_a_new_object_id()
new_host.host_name = new_name.replace('***', fmt % idx)
# Update some fields with the newly generated host name
for prop in ['display_name', 'alias', 'notes', 'notes_url', 'action_url']:
if getattr(new_host, prop, None) is None:
continue
value = getattr(new_host, prop)
if '$HOSTNAME$' in value:
setattr(new_host, prop, value.replace('$HOSTNAME$',
new_host.host_name))
generated_hosts.append(new_host)
if generated_hosts:
for new_host in generated_hosts:
if index is True:
new_host = self.index_item(new_host)
self.items[new_host.uuid] = new_host
logger.info(" cloned %d hosts from %s", len(generated_hosts), item.get_name())
else:
if index is True:
item = self.index_item(item)
self.items[item.uuid] = item
return generated_hosts
def remove_item(self, item):
"""
Remove (and un-index) an object
:param item: object to remove
:type item: alignak.objects.item.Item
:return: None
"""
logger.debug("Removing %s item: %s", item.my_type, item.get_name())
self.unindex_item(item)
self.items.pop(item.uuid, None)
def index_item(self, item):
"""
Index an item into our `name_to_item` dictionary.
If an object holding the same item's name/key already exists in the index
then the conflict is managed by the `manage_conflict` method.
:param item: item to index
:type item: alignak.objects.item.Item
:return: item modified
:rtype: object
"""
name_property = getattr(self.__class__.inner_class, "my_name_property", None)
if not name_property: # pragma: no cover, never called
raise "Missing my_name_property in class: %s" % self.__class__.inner_class
name = item.get_name(index=True)
if not name:
item.add_error("a %s item has been defined without %s, from: %s"
% (self.inner_class.my_type, name_property, item.imported_from))
elif name in self.name_to_item:
# An item is still defined with this name
item = self.manage_conflict(item, name)
self.name_to_item[name] = item
return item
def unindex_item(self, item):
"""
Un-index an item from our name_to_item dict.
:param item: the item to un-index
:type item: alignak.objects.item.Item
:return: None
"""
name_property = getattr(self.__class__.inner_class, "my_name_property", None)
if not name_property: # pragma: no cover, never called
raise "Missing my_name_property in class: %s" % self.__class__.inner_class
name = getattr(item, name_property, None)
if not name:
return
self.name_to_item.pop(name, None)
def find_by_name(self, name, template=False):
"""
Find an item by name
:param template: True to search in the templates
:type template: bool
:param name: name of the item (or template)
:type name: str
:return: item
:rtype: alignak.objects.item.Item
"""
if template:
return self.name_to_template.get(name, None)
return self.name_to_item.get(name, None)
def get_all_tags(self, item):
"""
Recursively get all tags of an item.
The list of tags is the list of the names of the templates
which the item is inheriting from.
:param item: an item
:type item: Item
:return: list of tags
:rtype: list
"""
# Get the item templates name list
all_tags = item.get_templates()
for template_id in item.templates:
template = self.templates[template_id]
all_tags.append(template.name)
all_tags.extend(self.get_all_tags(template))
return list(set(all_tags))
def linkify_item_templates(self, item):
"""
Link items with their templates
:param item: an item
:type item: alignak.objects.item.Item
:return: None
"""
item.templates = []
for template_name in item.get_templates():
template = self.find_by_name(template_name, template=True)
if not template:
item.add_warning("use/inherit from an unknown template: %s; from: %s"
% (template_name, item.imported_from))
continue
if template is item:
# Raise a log because templates are not fully managing error messages!
# item.add_error("use/inherits from itself; from: %s" % item.imported_from)
self.conf_is_correct = False
self.add_error("[%s::%s] use/inherits from itself; from: %s"
% (item.my_type, item.get_name(), item.imported_from))
logger.error("[%s::%s] use/inherits from itself; from: %s",
item.my_type, item.get_name(), item.imported_from)
continue
if template.uuid not in item.templates:
item.templates.append(template.uuid)
def linkify_templates(self):
"""
Link all templates, and create the template graph too
:return: None
"""
# First we link the templates each other
for item in list(self.templates.values()):
self.linkify_item_templates(item)
# Then we link items with their templates
for item in list(self.items.values()):
self.linkify_item_templates(item)
# And we set the templates names list as tags in the items
for item in self:
item.tags = self.get_all_tags(item)
def is_correct(self):
"""
Check if the items list configuration is correct ::
* check if duplicate items exist in the list and warn about this
* set alias and display_name property for each item in the list if they do not exist
* check each item in the list
* log all previous warnings
* log all previous errors
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
# Better check individual items before displaying the global items list errors and warnings
for item in self:
if not item.is_correct():
self.conf_is_correct = False
item.add_error("Configuration is incorrect; from: %s" % item.imported_from)
if item.configuration_errors:
self.add_error(item.configuration_errors)
if item.configuration_warnings:
self.add_warning(item.configuration_warnings)
# Raise all previous warnings
for msg in self.configuration_warnings:
logger.warning(msg)
# Raise all previous errors
for msg in self.configuration_errors:
logger.error(msg)
return self.conf_is_correct
def remove_templates(self):
"""
Remove templates
:return: None
"""
del self.templates
def clean(self):
"""
Clean the items and templates list
:return: None
"""
for item_uuid in self.items:
self.items[item_uuid].clean()
for item_uuid in self.templates:
self.templates[item_uuid].clean()
def fill_default(self):
"""
Define properties for each items with default value when not defined
:return: None
"""
for item in self:
item.fill_default()
def serialize(self, no_json=True, printing=False):
"""This function serializes items and their templates into a simple dict
object. It is used when transferring data to other daemons over the
network (http)
Here is the generic function that simply serializes each item of the items
list and each item of the templates list
Once we get all the items in the list, the whole result is JSON serialized
and returned to the caller.
:param no_json: if True return dict, otherwise return a json
:type no_json: bool
:param printing: if True, console prints some information to help debugging
:type printing: bool
:return: JSON lists of items and templates
:rtype: str
"""
res = {
'items': {},
'templates': {}
}
for item_uuid in self.items:
res['items'][item_uuid] = serialize(self.items[item_uuid],
no_json=no_json, printing=printing)
for item_uuid in self.templates:
res['templates'][item_uuid] = serialize(self.templates[item_uuid],
no_json=no_json, printing=printing)
try:
res = json.dumps(res, ensure_ascii=False, indent=None,
separators=(', ', ': '), default=default_serialize)
except Exception as exp:
logger.error("Items serialize exception: %s", exp)
print("Items serialize exception: %s" % exp)
return res
def apply_partial_inheritance(self, prop):
"""
Define property with inheritance value of the property
:param prop: property
:type prop: str
:return: None
"""
# todo: itertools.chain ? Why not doing two loops: items and then templates...
for item in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.get_property_by_inheritance(item, prop)
# If a "null" attribute was inherited, delete it
if getattr(item, prop, None) == 'null':
delattr(item, prop)
def apply_inheritance(self):
"""
For all items and templates inherit properties and custom variables.
:return: None
"""
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
for prop in self.inner_class.properties:
self.apply_partial_inheritance(prop)
# todo: itertools.chain ? Why not doing two loops: items and then templates...
for item in itertools.chain(iter(list(self.items.values())),
iter(list(self.templates.values()))):
self.get_customs_properties_by_inheritance(item)
def linkify_with_contacts(self, contacts):
"""
Link items with contacts items
:param contacts: all contacts object
:type contacts: alignak.objects.contact.Contacts
:return: None
"""
for i in self:
if not hasattr(i, 'contacts'):
continue
links_list = strip_and_uniq(i.contacts)
new = []
for name in [e for e in links_list if e]:
contact = contacts.find_by_name(name)
if contact is not None and contact.uuid not in new:
new.append(contact.uuid)
else:
i.add_error("the contact '%s' defined for '%s' is unknown"
% (name, i.get_name()))
i.contacts = new
def linkify_with_escalations(self, escalations):
"""
Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None
"""
for i in self:
if not hasattr(i, 'escalations'):
continue
links_list = strip_and_uniq(i.escalations)
new = []
for name in [e for e in links_list if e]:
escalation = escalations.find_by_name(name)
if escalation is not None and escalation.uuid not in new:
new.append(escalation.uuid)
else:
i.add_error("the escalation '%s' defined for '%s' is unknown"
% (name, i.get_name()))
i.escalations = new
def linkify_with_result_modulations(self, resultmodulations):
"""
Link items with resultmodulations items
:param resultmodulations: all resultmodulations object
:type resultmodulations: alignak.resultmodulation.Resultmodulations
:return: None
"""
for i in self:
if not hasattr(i, 'resultmodulations'):
continue
links_list = strip_and_uniq(i.resultmodulations)
new = []
for name in [e for e in links_list if e]:
modulation = resultmodulations.find_by_name(name)
if modulation is not None and modulation.uuid not in new:
new.append(modulation.uuid)
else:
i.add_error("the result modulation '%s' defined on the %s "
"'%s' do not exist" % (name, i.__class__.my_type, i.get_name()))
i.resultmodulations = new
def linkify_with_business_impact_modulations(self, business_impact_modulations):
"""
Link items with business impact objects
:param business_impact_modulations: all business impacts object
:type business_impact_modulations: alignak.objects.businessmodulation.Businessmodulations
:return: None
"""
for i in self:
if not hasattr(i, 'business_impact_modulations'):
continue
links_list = strip_and_uniq(i.business_impact_modulations)
new = []
for name in [e for e in links_list if e]:
modulation = business_impact_modulations.find_by_name(name)
if modulation is not None and modulation.uuid not in new:
new.append(modulation.uuid)
else:
i.add_error("the business impact modulation '%s' "
"defined on the %s '%s' do not exist"
% (name, i.__class__.my_type, i.get_name()))
new.append(modulation.uuid)
i.business_impact_modulations = new
@staticmethod
def explode_contact_groups_into_contacts(item, contactgroups):
"""
Get all contacts of contact_groups and put them in contacts container
:param item: item where have contact_groups property
:type item: object
:param contactgroups: all contactgroups object
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
if not hasattr(item, 'contact_groups'):
return
# TODO : See if we can remove this if
cg_names = ''
if item.contact_groups:
if isinstance(item.contact_groups, list):
cg_names = item.contact_groups
else:
cg_names = item.contact_groups.split(',')
cg_names = strip_and_uniq(cg_names)
for cg_name in cg_names:
if not contactgroups.find_by_name(cg_name):
item.add_error("The contact group '%s' defined for the %s '%s' do not exist"
% (cg_name, item.__class__.my_type, item.get_name()))
continue
cnames = contactgroups.get_members_of_group(cg_name)
# We add contacts into our contacts
if cnames:
if hasattr(item, 'contacts'):
# Fix #1054 - bad contact explosion
# item.contacts.extend(cnames)
item.contacts = item.contacts + cnames
else:
item.contacts = cnames
def linkify_with_timeperiods(self, timeperiods, prop):
"""
Link items with timeperiods items
:param timeperiods: all timeperiods object
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param prop: property name
:type prop: str
:return: None
"""
for i in self:
if not hasattr(i, prop):
continue
tpname = getattr(i, prop).strip()
# some default values are '', so set None
if not tpname:
setattr(i, prop, '')
continue
# Ok, get a real name, search for it
timeperiod = timeperiods.find_by_name(tpname)
if timeperiod is None:
i.add_error("The %s named '%s' is unknown" % (prop, tpname))
continue
setattr(i, prop, timeperiod.uuid)
def linkify_with_check_modulations(self, checkmodulations):
"""
Link checkmodulation object
:param checkmodulations: checkmodulations object
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:return: None
"""
for i in self:
if not hasattr(i, 'checkmodulations'):
continue
links_list = strip_and_uniq(i.checkmodulations)
new = []
for name in [e for e in links_list if e]:
modulation = checkmodulations.find_by_name(name)
if modulation is not None and modulation.uuid not in new:
new.append(modulation.uuid)
else:
i.add_error("The checkmodulations of the %s '%s' named "
"'%s' is unknown" % (i.__class__.my_type, i.get_name(), name))
i.checkmodulations = new
def linkify_with_macro_modulations(self, macromodulations):
"""
Link macromodulations
:param macromodulations: macromodulations object
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:return: None
"""
for i in self:
if not hasattr(i, 'macromodulations'):
continue
links_list = strip_and_uniq(i.macromodulations)
new = []
for name in [e for e in links_list if e]:
modulation = macromodulations.find_by_name(name)
if modulation is not None and modulation.uuid not in new:
new.append(modulation.uuid)
else:
i.add_error("The macromodulations of the %s '%s' named "
"'%s' is unknown" % (i.__class__.my_type, i.get_name(), name))
i.macromodulations = new
@staticmethod
def evaluate_hostgroup_expression(expr, hosts, hostgroups, look_in='hostgroups'):
"""
Evaluate hostgroup expression
:param expr: an expression
:type expr: str
:param hosts: hosts object (all hosts)
:type hosts: alignak.objects.host.Hosts
:param hostgroups: hostgroups object (all hostgroups)
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param look_in: item name where search
:type look_in: str
:return: return list of hostgroups
:rtype: list
"""
# Maybe exp is a list, like numerous hostgroups entries in a service, link them
if isinstance(expr, list):
expr = '|'.join(expr)
if look_in == 'hostgroups':
node = ComplexExpressionFactory(look_in, hostgroups, hosts)
else: # templates
node = ComplexExpressionFactory(look_in, hosts, hosts)
expr_tree = node.eval_cor_pattern(expr)
set_res = expr_tree.resolve_elements()
# HOOK DBG
return list(set_res)
@staticmethod
def get_hosts_from_hostgroups(hgname, hostgroups):
"""
Get hosts of hostgroups
:param hgname: hostgroup name
:type hgname: str
:param hostgroups: hostgroups object (all hostgroups)
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts
:rtype: list
"""
if not isinstance(hgname, list):
hgname = [e.strip() for e in hgname.split(',') if e.strip()]
host_names = []
for name in hgname:
hostgroup = hostgroups.find_by_name(name)
if hostgroup is None:
raise ValueError("the hostgroup '%s' is unknown" % hgname)
mbrs = [h.strip() for h in hostgroup.get_hosts() if h.strip()]
host_names.extend(mbrs)
return host_names
def explode_host_groups_into_hosts(self, item, hosts, hostgroups):
"""
Get all hosts of hostgroups and add all in host_name container
:param item: the item object
:type item: alignak.objects.item.Item
:param hosts: hosts object
:type hosts: alignak.objects.host.Hosts
:param hostgroups: hostgroups object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
hnames_list = []
# Gets item's hostgroup_name
hgnames = getattr(item, "hostgroup_name", '') or ''
# Defines if hostgroup is a complex expression
# Expands hostgroups
if is_complex_expr(hgnames):
hnames_list.extend(self.evaluate_hostgroup_expression(
item.hostgroup_name, hosts, hostgroups))
elif hgnames:
try:
hnames_list.extend(
self.get_hosts_from_hostgroups(hgnames, hostgroups))
except ValueError as err: # pragma: no cover, simple protection
item.add_error(str(err))
# Expands host names
hname = getattr(item, "host_name", '')
hnames_list.extend([n.strip() for n in hname.split(',') if n.strip()])
hnames = set()
for host in hnames_list:
# If the host start with a !, it's to be removed from
# the hostgroup get list
if host.startswith('!'):
hst_to_remove = host[1:].strip()
try:
hnames.remove(hst_to_remove)
except KeyError:
pass
elif host == '*':
hnames.update([host.host_name for host
in hosts.items.values() if getattr(host, 'host_name', '')])
# Else it's a host to add, but maybe it's ALL
else:
hnames.add(host)
item.host_name = ','.join(hnames)
def no_loop_in_parents(self, attr1, attr2):
# pylint: disable=too-many-branches
"""
Find loop in dependencies.
For now, used with the following attributes :
:(self, parents):
host dependencies from host object
:(host_name, dependent_host_name):\
host dependencies from hostdependencies object
:(service_description, dependent_service_description):
service dependencies from servicedependencies object
:param attr1: attribute name
:type attr1: str
:param attr2: attribute name
:type attr2: str
:return: list
:rtype: list
"""
# Ok, we say "from now, no loop :) "
# in_loop = []
# Create parent graph
parents = Graph()
# Start with all items as nodes
for item in self:
# Hack to get self here. Used when looping on host and host parent's
if attr1 == "self":
obj = item.uuid # obj is a host/service [list]
else:
obj = getattr(item, attr1, None)
if obj is not None:
if isinstance(obj, list):
for sobj in obj:
parents.add_node(sobj)
else:
parents.add_node(obj)
# And now fill edges
# pylint: disable=too-many-nested-blocks
for item in self:
if attr1 == "self":
obj1 = item.uuid
else:
obj1 = getattr(item, attr1, None)
obj2 = getattr(item, attr2, None)
if obj2 is not None:
if isinstance(obj2, list):
for sobj2 in obj2:
if isinstance(obj1, list):
for sobj1 in obj1:
parents.add_edge(sobj1, sobj2)
else:
parents.add_edge(obj1, sobj2)
else:
if isinstance(obj1, list):
for sobj1 in obj1:
parents.add_edge(sobj1, obj2)
else:
parents.add_edge(obj1, obj2)
return parents.loop_check()
def get_property_by_inheritance(self, obj, prop):
# pylint: disable=too-many-branches, too-many-nested-blocks
"""
Get the property asked in parameter to this object or from defined templates of this
object
todo: rewrite this function which is really too complex!
:param obj: the object to search the property
:type obj: alignak.objects.item.Item
:param prop: name of property
:type prop: str
:return: Value of property of this object or of a template
:rtype: str or None
"""
if prop == 'register':
# We do not inherit the register property
return None
# If I have the property, I take mine but I check if I must add a plus property
if hasattr(obj, prop):
value = getattr(obj, prop)
# print("I (%s) have %s (%s): %s" % (obj, prop, type(prop), value))
# Manage the additive inheritance for the property,
# if property is in plus, add or replace it
# Template should keep the '+' at the beginning of the chain
if obj.has_plus(prop):
if not isinstance(value, list):
value = [value]
value.insert(0, obj.get_plus_and_delete(prop))
value = list(set(value))
if obj.is_a_template():
value.insert(0, '+')
# Clean the returned value
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_a_template():
while '+' in value:
value.remove('+')
return value
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
# We reverse list, so that when looking for properties by inheritance,
# the least defined template wins (if property is set).
for t_id in getattr(obj, 'templates', []):
template = self.templates[t_id]
value = self.get_property_by_inheritance(template, prop)
if value is None or (isinstance(value, list) and not value):
continue
# If our template give us a '+' value, we continue the loop
still_loop = False
if isinstance(value, list) and value[0] == '+':
# Templates should keep their + inherited from their parents
if not obj.is_a_template():
value = list(value)
value = value[1:]
still_loop = True
# Maybe in the previous loop, we set a value, use it too
if hasattr(obj, prop):
# If the current value is a string, it will simplify the problem
if isinstance(value, (list, string_types)) and value and value[0] == '+':
# In this case we can remove the + from our current
# tpl because our value will be final
new_val = list(getattr(obj, prop))
new_val.extend(value[1:])
value = new_val
else: # If not, we should keep the + sign of need
new_val = list(getattr(obj, prop))
new_val.extend(value)
value = new_val
# Ok, we can set it and uniquify a list if needed
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_a_template():
while '+' in value:
value.remove('+')
setattr(obj, prop, value)
# If we only got some '+' values, we must still loop
# for an end value without it
if not still_loop:
# And set my own value in the end if need
if obj.has_plus(prop):
# value = list(getattr(obj, prop, []))
value = list(value)
value.extend(obj.get_plus_and_delete(prop))
# Template should keep their '+'
if obj.is_a_template() and value[0] != '+':
value.insert(0, '+')
# Clean the returned value
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_a_template():
while '+' in value:
value.remove('+')
setattr(obj, prop, value)
return value
# Maybe templates only give us + values, so we didn't quit, but we already got a
# self.prop value after all
template_with_only_plus = hasattr(obj, prop)
# I do not have an ending prop, my templates too... Maybe a plus?
# warning: if all my templates gave me '+' values, do not forget to
# add the already set self.prop value
if obj.has_plus(prop):
if template_with_only_plus:
value = list(getattr(obj, prop))
value.extend(obj.get_plus_and_delete(prop))
else:
value = obj.get_plus_and_delete(prop)
# Template should keep their '+' chain
# We must say it's a '+' value, so our son will know that it must continue looping
if obj.is_a_template() and value != [] and value[0] != '+':
value.insert(0, '+')
# Clean the returned value
if isinstance(value, list):
# Get unique ordered list
new_list = []
for elt in value:
if elt not in new_list:
new_list.append(elt)
value = new_list
if not obj.is_a_template():
while '+' in value:
value.remove('+')
setattr(obj, prop, value)
return value
# Ok so in the end, we give the value we got if we have one, or None
# Not even a plus... so None :)
return getattr(obj, prop, None)
def get_customs_properties_by_inheritance(self, obj):
"""
Get custom properties from the templates defined in this object
:param obj: the oject to search the property
:type obj: alignak.objects.item.Item
:return: list of custom properties
:rtype: list
"""
for t_id in getattr(obj, 'templates', []):
template = self.templates[t_id]
tpl_cv = self.get_customs_properties_by_inheritance(template)
if tpl_cv:
for prop in tpl_cv:
if prop not in obj.customs:
value = tpl_cv[prop]
else:
value = obj.customs[prop]
if obj.has_plus(prop):
value.insert(0, obj.get_plus_and_delete(prop))
# value = self.get_plus_and_delete(prop) + ',' + value
obj.customs[prop] = value
for prop in obj.customs:
value = obj.customs[prop]
if obj.has_plus(prop):
value.insert(0, obj.get_plus_and_delete(prop))
obj.customs[prop] = value
# We can get custom properties in plus, we need to get all
# entires and put
# them into customs
cust_in_plus = obj.get_all_plus_and_delete()
for prop in cust_in_plus:
obj.customs[prop] = cust_in_plus[prop]
return obj.customs
|
class Items(object):
'''
Class to manage all Item objects of the same type
'''
def __init__(self, items, index_items=True, parsing=True):
pass
def __repr__(self):
pass
def __iter__(self):
pass
def __len__(self):
pass
def __delitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __getitem__(self, key):
'''Get a specific objects for Items dict.
Ie : a host in the Hosts dict, a service in the Service dict etc.
:param key: object uuid
:type key: str
:return: The wanted object
:rtype: alignak.object.item.Item
'''
pass
def __contains__(self, key):
pass
def add_error(self, error_message):
'''Add a message in the configuration errors list so we can print them
all in one place
Set the object configuration as not correct
:param error_message: error message
:type error_message: str
:return: None
'''
pass
def add_warning(self, warning_message):
'''Add a message in the configuration warnings list so we can print them
all in one place
:param warning_message: warning message
:type warning_message: str
:return: None
'''
pass
def add_items(self, items, index_items):
'''
Add items to the templates list if it is a template, else add in the items list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None
'''
pass
def manage_conflict(self, item, name):
'''
Checks if an object holding the same name already exists in the index.
If so, it compares their definition order: the lowest definition order
is kept. If definition order equals, an error is risen.
The method returns the item that should be added after it has decided
which one should be kept.
If the new item has precedence over the New existing one, the
existing is removed for the new to replace it.
:param item: object to check for conflict
:type item: alignak.objects.item.Item
:param name: name of the object
:type name: str
:return: 'item' parameter modified
:rtype: object
'''
pass
def add_template(self, template):
'''
Add and index a template into the `templates` container.
:param template: The template to add
:type template: alignak.objects.item.Item
:return: None
'''
pass
def index_template(self, template):
'''
Indexes a template by `name` into the `name_to_template` dictionary.
If an object holding the same item's name/key already exists in the index
then the conflict is managed by the `manage_conflict` method.
:param template: The template to index
:type template: alignak.objects.item.Item
:return: None
'''
pass
def remove_template(self, template):
'''
Removes and un-index a template from the `templates` container.
:param template: The template to remove
:type template: alignak.objects.item.Item
:return: None
'''
pass
def unindex_template(self, template):
'''
Unindex a template from the `templates` container.
:param template: The template to un-index
:type template: alignak.objects.item.Item
:return: None
'''
pass
def add_items(self, items, index_items):
'''
Add an item into our containers, and index it depending on the `index` flag.
:param item: object to add
:type item: alignak.objects.item.Item
:param index: Flag indicating if the item should be indexed
:type index: bool
:return: the new items created
:rtype list
'''
pass
def remove_item(self, item):
'''
Remove (and un-index) an object
:param item: object to remove
:type item: alignak.objects.item.Item
:return: None
'''
pass
def index_item(self, item):
'''
Index an item into our `name_to_item` dictionary.
If an object holding the same item's name/key already exists in the index
then the conflict is managed by the `manage_conflict` method.
:param item: item to index
:type item: alignak.objects.item.Item
:return: item modified
:rtype: object
'''
pass
def unindex_item(self, item):
'''
Un-index an item from our name_to_item dict.
:param item: the item to un-index
:type item: alignak.objects.item.Item
:return: None
'''
pass
def find_by_name(self, name, template=False):
'''
Find an item by name
:param template: True to search in the templates
:type template: bool
:param name: name of the item (or template)
:type name: str
:return: item
:rtype: alignak.objects.item.Item
'''
pass
def get_all_tags(self, item):
'''
Recursively get all tags of an item.
The list of tags is the list of the names of the templates
which the item is inheriting from.
:param item: an item
:type item: Item
:return: list of tags
:rtype: list
'''
pass
def linkify_item_templates(self, item):
'''
Link items with their templates
:param item: an item
:type item: alignak.objects.item.Item
:return: None
'''
pass
def linkify_templates(self):
'''
Link all templates, and create the template graph too
:return: None
'''
pass
def is_correct(self):
'''
Check if the items list configuration is correct ::
* check if duplicate items exist in the list and warn about this
* set alias and display_name property for each item in the list if they do not exist
* check each item in the list
* log all previous warnings
* log all previous errors
:return: True if the configuration is correct, otherwise False
:rtype: bool
'''
pass
def remove_templates(self):
'''
Remove templates
:return: None
'''
pass
def clean(self):
'''
Clean the items and templates list
:return: None
'''
pass
def fill_default(self):
'''
Define properties for each items with default value when not defined
:return: None
'''
pass
def serialize(self, no_json=True, printing=False):
'''This function serializes items and their templates into a simple dict
object. It is used when transferring data to other daemons over the
network (http)
Here is the generic function that simply serializes each item of the items
list and each item of the templates list
Once we get all the items in the list, the whole result is JSON serialized
and returned to the caller.
:param no_json: if True return dict, otherwise return a json
:type no_json: bool
:param printing: if True, console prints some information to help debugging
:type printing: bool
:return: JSON lists of items and templates
:rtype: str
'''
pass
def apply_partial_inheritance(self, prop):
'''
Define property with inheritance value of the property
:param prop: property
:type prop: str
:return: None
'''
pass
def apply_inheritance(self):
'''
For all items and templates inherit properties and custom variables.
:return: None
'''
pass
def linkify_with_contacts(self, contacts):
'''
Link items with contacts items
:param contacts: all contacts object
:type contacts: alignak.objects.contact.Contacts
:return: None
'''
pass
def linkify_with_escalations(self, escalations):
'''
Link with escalations
:param escalations: all escalations object
:type escalations: alignak.objects.escalation.Escalations
:return: None
'''
pass
def linkify_with_result_modulations(self, resultmodulations):
'''
Link items with resultmodulations items
:param resultmodulations: all resultmodulations object
:type resultmodulations: alignak.resultmodulation.Resultmodulations
:return: None
'''
pass
def linkify_with_business_impact_modulations(self, business_impact_modulations):
'''
Link items with business impact objects
:param business_impact_modulations: all business impacts object
:type business_impact_modulations: alignak.objects.businessmodulation.Businessmodulations
:return: None
'''
pass
@staticmethod
def explode_contact_groups_into_contacts(item, contactgroups):
'''
Get all contacts of contact_groups and put them in contacts container
:param item: item where have contact_groups property
:type item: object
:param contactgroups: all contactgroups object
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
'''
pass
def linkify_with_timeperiods(self, timeperiods, prop):
'''
Link items with timeperiods items
:param timeperiods: all timeperiods object
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param prop: property name
:type prop: str
:return: None
'''
pass
def linkify_with_check_modulations(self, checkmodulations):
'''
Link checkmodulation object
:param checkmodulations: checkmodulations object
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:return: None
'''
pass
def linkify_with_macro_modulations(self, macromodulations):
'''
Link macromodulations
:param macromodulations: macromodulations object
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:return: None
'''
pass
@staticmethod
def evaluate_hostgroup_expression(expr, hosts, hostgroups, look_in='hostgroups'):
'''
Evaluate hostgroup expression
:param expr: an expression
:type expr: str
:param hosts: hosts object (all hosts)
:type hosts: alignak.objects.host.Hosts
:param hostgroups: hostgroups object (all hostgroups)
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param look_in: item name where search
:type look_in: str
:return: return list of hostgroups
:rtype: list
'''
pass
@staticmethod
def get_hosts_from_hostgroups(hgname, hostgroups):
'''
Get hosts of hostgroups
:param hgname: hostgroup name
:type hgname: str
:param hostgroups: hostgroups object (all hostgroups)
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts
:rtype: list
'''
pass
def explode_host_groups_into_hosts(self, item, hosts, hostgroups):
'''
Get all hosts of hostgroups and add all in host_name container
:param item: the item object
:type item: alignak.objects.item.Item
:param hosts: hosts object
:type hosts: alignak.objects.host.Hosts
:param hostgroups: hostgroups object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
'''
pass
def no_loop_in_parents(self, attr1, attr2):
'''
Find loop in dependencies.
For now, used with the following attributes :
:(self, parents):
host dependencies from host object
:(host_name, dependent_host_name): host dependencies from hostdependencies object
:(service_description, dependent_service_description):
service dependencies from servicedependencies object
:param attr1: attribute name
:type attr1: str
:param attr2: attribute name
:type attr2: str
:return: list
:rtype: list
'''
pass
def get_property_by_inheritance(self, obj, prop):
'''
Get the property asked in parameter to this object or from defined templates of this
object
todo: rewrite this function which is really too complex!
:param obj: the object to search the property
:type obj: alignak.objects.item.Item
:param prop: name of property
:type prop: str
:return: Value of property of this object or of a template
:rtype: str or None
'''
pass
def get_customs_properties_by_inheritance(self, obj):
'''
Get custom properties from the templates defined in this object
:param obj: the oject to search the property
:type obj: alignak.objects.item.Item
:return: list of custom properties
:rtype: list
'''
pass
| 49 | 39 | 25 | 3 | 14 | 9 | 5 | 0.65 | 1 | 12 | 3 | 16 | 42 | 7 | 45 | 45 | 1,193 | 176 | 622 | 177 | 573 | 405 | 547 | 171 | 501 | 38 | 1 | 6 | 221 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.