id
int64
0
843k
repository_name
stringlengths
7
55
file_path
stringlengths
9
332
class_name
stringlengths
3
290
human_written_code
stringlengths
12
4.36M
class_skeleton
stringlengths
19
2.2M
total_program_units
int64
1
9.57k
total_doc_str
int64
0
4.2k
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
300
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
176
CountClassBase
float64
0
48
CountClassCoupled
float64
0
589
CountClassCoupledModified
float64
0
581
CountClassDerived
float64
0
5.37k
CountDeclInstanceMethod
float64
0
4.2k
CountDeclInstanceVariable
float64
0
299
CountDeclMethod
float64
0
4.2k
CountDeclMethodAll
float64
0
4.2k
CountLine
float64
1
115k
CountLineBlank
float64
0
9.01k
CountLineCode
float64
0
94.4k
CountLineCodeDecl
float64
0
46.1k
CountLineCodeExe
float64
0
91.3k
CountLineComment
float64
0
27k
CountStmt
float64
1
93.2k
CountStmtDecl
float64
0
46.1k
CountStmtExe
float64
0
90.2k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
6k
3,900
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/external_command.py
alignak.external_command.ExternalCommandManager
class ExternalCommandManager(object): """ExternalCommandManager manages all external commands sent to Alignak. It basically parses arguments and executes the right function """ commands = { 'change_contact_modsattr': {'global': True, 'args': ['contact', None]}, 'change_contact_modhattr': {'global': True, 'args': ['contact', None]}, 'change_contact_modattr': {'global': True, 'args': ['contact', None]}, 'change_contact_host_notification_timeperiod': {'global': True, 'args': ['contact', 'time_period']}, 'add_svc_comment': {'global': False, 'args': ['service', 'obsolete', 'author', None]}, 'add_host_comment': {'global': False, 'args': ['host', 'obsolete', 'author', None]}, 'acknowledge_svc_problem': {'global': False, 'args': ['service', 'to_int', 'to_bool', 'obsolete', 'author', None]}, 'acknowledge_host_problem': {'global': False, 'args': ['host', 'to_int', 'to_bool', 'obsolete', 'author', None]}, 'acknowledge_svc_problem_expire': {'global': False, 'args': ['service', 'to_int', 'to_bool', 'obsolete', 'to_int', 'author', None]}, 'acknowledge_host_problem_expire': {'global': False, 'args': ['host', 'to_int', 'to_bool', 'obsolete', 'to_int', 'author', None]}, 'change_contact_svc_notification_timeperiod': {'global': True, 'args': ['contact', 'time_period']}, 'change_custom_contact_var': {'global': True, 'args': ['contact', None, None]}, 'change_custom_host_var': {'global': False, 'args': ['host', None, None]}, 'change_custom_svc_var': {'global': False, 'args': ['service', None, None]}, 'change_global_host_event_handler': {'global': True, 'args': ['command']}, 'change_global_svc_event_handler': {'global': True, 'args': ['command']}, 'change_host_check_command': {'global': False, 'args': ['host', 'command']}, 'change_host_check_timeperiod': {'global': False, 'args': ['host', 'time_period']}, 'change_host_event_handler': {'global': False, 'args': ['host', 'command']}, 'change_host_snapshot_command': {'global': False, 'args': ['host', 'command']}, 'change_host_modattr': {'global': False, 'args': ['host', 'to_int']}, 'change_max_host_check_attempts': {'global': False, 'args': ['host', 'to_int']}, 'change_max_svc_check_attempts': {'global': False, 'args': ['service', 'to_int']}, 'change_normal_host_check_interval': {'global': False, 'args': ['host', 'to_int']}, 'change_normal_svc_check_interval': {'global': False, 'args': ['service', 'to_int']}, 'change_retry_host_check_interval': {'global': False, 'args': ['host', 'to_int']}, 'change_retry_svc_check_interval': {'global': False, 'args': ['service', 'to_int']}, 'change_svc_check_command': {'global': False, 'args': ['service', 'command']}, 'change_svc_check_timeperiod': {'global': False, 'args': ['service', 'time_period']}, 'change_svc_event_handler': {'global': False, 'args': ['service', 'command']}, 'change_svc_snapshot_command': {'global': False, 'args': ['service', 'command']}, 'change_svc_modattr': {'global': False, 'args': ['service', 'to_int']}, 'change_svc_notification_timeperiod': {'global': False, 'args': ['service', 'time_period']}, 'delay_host_notification': {'global': False, 'args': ['host', 'to_int']}, 'delay_svc_notification': {'global': False, 'args': ['service', 'to_int']}, 'del_all_contact_downtimes': {'global': False, 'args': ['contact']}, 'del_all_host_comments': {'global': False, 'args': ['host']}, 'del_all_host_downtimes': {'global': False, 'args': ['host']}, 'del_all_svc_comments': {'global': False, 'args': ['service']}, 'del_all_svc_downtimes': {'global': False, 'args': ['service']}, 'del_contact_downtime': {'global': True, 'args': [None]}, 'del_host_comment': {'global': True, 'args': [None]}, 'del_host_downtime': {'global': True, 'args': [None]}, 'del_svc_comment': {'global': True, 'args': [None]}, 'del_svc_downtime': {'global': True, 'args': [None]}, 'disable_all_notifications_beyond_host': {'global': False, 'args': ['host']}, 'disable_contactgroup_host_notifications': {'global': True, 'args': ['contact_group']}, 'disable_contactgroup_svc_notifications': {'global': True, 'args': ['contact_group']}, 'disable_contact_host_notifications': {'global': True, 'args': ['contact']}, 'disable_contact_svc_notifications': {'global': True, 'args': ['contact']}, 'disable_event_handlers': {'global': True, 'args': []}, 'disable_failure_prediction': {'global': True, 'args': []}, 'disable_flap_detection': {'global': True, 'args': []}, 'disable_hostgroup_host_checks': {'global': True, 'args': ['host_group']}, 'disable_hostgroup_host_notifications': {'global': True, 'args': ['host_group']}, 'disable_hostgroup_passive_host_checks': {'global': True, 'args': ['host_group']}, 'disable_hostgroup_passive_svc_checks': {'global': True, 'args': ['host_group']}, 'disable_hostgroup_svc_checks': {'global': True, 'args': ['host_group']}, 'disable_hostgroup_svc_notifications': {'global': True, 'args': ['host_group']}, 'disable_host_and_child_notifications': {'global': False, 'args': ['host']}, 'disable_host_check': {'global': False, 'args': ['host']}, 'disable_host_event_handler': {'global': False, 'args': ['host']}, 'disable_host_flap_detection': {'global': False, 'args': ['host']}, 'disable_host_freshness_check': {'global': False, 'args': ['host']}, 'disable_host_freshness_checks': {'global': True, 'args': []}, 'disable_host_notifications': {'global': False, 'args': ['host']}, 'disable_host_svc_checks': {'global': False, 'args': ['host']}, 'disable_host_svc_notifications': {'global': False, 'args': ['host']}, 'disable_notifications': {'global': True, 'args': []}, 'disable_passive_host_checks': {'global': False, 'args': ['host']}, 'disable_passive_svc_checks': {'global': False, 'args': ['service']}, 'disable_performance_data': {'global': True, 'args': []}, 'disable_servicegroup_host_checks': {'global': True, 'args': ['service_group']}, 'disable_servicegroup_host_notifications': {'global': True, 'args': ['service_group']}, 'disable_servicegroup_passive_host_checks': {'global': True, 'args': ['service_group']}, 'disable_servicegroup_passive_svc_checks': {'global': True, 'args': ['service_group']}, 'disable_servicegroup_svc_checks': {'global': True, 'args': ['service_group']}, 'disable_servicegroup_svc_notifications': {'global': True, 'args': ['service_group']}, 'disable_service_flap_detection': {'global': False, 'args': ['service']}, 'disable_service_freshness_checks': {'global': True, 'args': []}, 'disable_svc_check': {'global': False, 'args': ['service']}, 'disable_svc_event_handler': {'global': False, 'args': ['service']}, 'disable_svc_flap_detection': {'global': False, 'args': ['service']}, 'disable_svc_freshness_check': {'global': False, 'args': ['service']}, 'disable_svc_notifications': {'global': False, 'args': ['service']}, 'enable_all_notifications_beyond_host': {'global': False, 'args': ['host']}, 'enable_contactgroup_host_notifications': {'global': True, 'args': ['contact_group']}, 'enable_contactgroup_svc_notifications': {'global': True, 'args': ['contact_group']}, 'enable_contact_host_notifications': {'global': True, 'args': ['contact']}, 'enable_contact_svc_notifications': {'global': True, 'args': ['contact']}, 'enable_event_handlers': {'global': True, 'args': []}, 'enable_failure_prediction': {'global': True, 'args': []}, 'enable_flap_detection': {'global': True, 'args': []}, 'enable_hostgroup_host_checks': {'global': True, 'args': ['host_group']}, 'enable_hostgroup_host_notifications': {'global': True, 'args': ['host_group']}, 'enable_hostgroup_passive_host_checks': {'global': True, 'args': ['host_group']}, 'enable_hostgroup_passive_svc_checks': {'global': True, 'args': ['host_group']}, 'enable_hostgroup_svc_checks': {'global': True, 'args': ['host_group']}, 'enable_hostgroup_svc_notifications': {'global': True, 'args': ['host_group']}, 'enable_host_and_child_notifications': {'global': False, 'args': ['host']}, 'enable_host_check': {'global': False, 'args': ['host']}, 'enable_host_event_handler': {'global': False, 'args': ['host']}, 'enable_host_flap_detection': {'global': False, 'args': ['host']}, 'enable_host_freshness_check': {'global': False, 'args': ['host']}, 'enable_host_freshness_checks': {'global': True, 'args': []}, 'enable_host_notifications': {'global': False, 'args': ['host']}, 'enable_host_svc_checks': {'global': False, 'args': ['host']}, 'enable_host_svc_notifications': {'global': False, 'args': ['host']}, 'enable_notifications': {'global': True, 'args': []}, 'enable_passive_host_checks': {'global': False, 'args': ['host']}, 'enable_passive_svc_checks': {'global': False, 'args': ['service']}, 'enable_performance_data': {'global': True, 'args': []}, 'enable_servicegroup_host_checks': {'global': True, 'args': ['service_group']}, 'enable_servicegroup_host_notifications': {'global': True, 'args': ['service_group']}, 'enable_servicegroup_passive_host_checks': {'global': True, 'args': ['service_group']}, 'enable_servicegroup_passive_svc_checks': {'global': True, 'args': ['service_group']}, 'enable_servicegroup_svc_checks': {'global': True, 'args': ['service_group']}, 'enable_servicegroup_svc_notifications': {'global': True, 'args': ['service_group']}, 'enable_service_freshness_checks': {'global': True, 'args': []}, 'enable_svc_check': {'global': False, 'args': ['service']}, 'enable_svc_event_handler': {'global': False, 'args': ['service']}, 'enable_svc_flap_detection': {'global': False, 'args': ['service']}, 'enable_svc_freshness_check': {'global': False, 'args': ['service']}, 'enable_svc_notifications': {'global': False, 'args': ['service']}, 'process_file': {'global': True, 'args': [None, 'to_bool']}, 'process_host_check_result': {'global': False, 'args': ['host', 'to_int', None]}, 'process_host_output': {'global': False, 'args': ['host', None]}, 'process_service_check_result': {'global': False, 'args': ['service', 'to_int', None]}, 'process_service_output': {'global': False, 'args': ['service', None]}, 'read_state_information': {'global': True, 'args': []}, 'remove_host_acknowledgement': {'global': False, 'args': ['host']}, 'remove_svc_acknowledgement': {'global': False, 'args': ['service']}, 'restart_program': {'global': True, 'internal': True, 'args': []}, 'reload_config': {'global': True, 'internal': True, 'args': []}, 'save_state_information': {'global': True, 'args': []}, 'schedule_and_propagate_host_downtime': {'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool', 'to_int', 'to_int', 'author', None]}, 'schedule_and_propagate_triggered_host_downtime': {'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool', 'to_int', 'to_int', 'author', None]}, 'schedule_contact_downtime': {'global': True, 'args': ['contact', 'to_int', 'to_int', 'author', None]}, 'schedule_forced_host_check': {'global': False, 'args': ['host', 'to_int']}, 'schedule_forced_host_svc_checks': {'global': False, 'args': ['host', 'to_int']}, 'schedule_forced_svc_check': {'global': False, 'args': ['service', 'to_int']}, 'schedule_hostgroup_host_downtime': {'global': True, 'args': ['host_group', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'schedule_hostgroup_svc_downtime': {'global': True, 'args': ['host_group', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'schedule_host_check': {'global': False, 'args': ['host', 'to_int']}, 'schedule_host_downtime': {'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'schedule_host_svc_checks': {'global': False, 'args': ['host', 'to_int']}, 'schedule_host_svc_downtime': {'global': False, 'args': ['host', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'schedule_servicegroup_host_downtime': {'global': True, 'args': ['service_group', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'schedule_servicegroup_svc_downtime': {'global': True, 'args': ['service_group', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'schedule_svc_check': {'global': False, 'args': ['service', 'to_int']}, 'schedule_svc_downtime': {'global': False, 'args': ['service', 'to_int', 'to_int', 'to_bool', None, 'to_int', 'author', None]}, 'send_custom_host_notification': {'global': False, 'args': ['host', 'to_int', 'author', None]}, 'send_custom_svc_notification': {'global': False, 'args': ['service', 'to_int', 'author', None]}, 'set_host_notification_number': {'global': False, 'args': ['host', 'to_int']}, 'set_svc_notification_number': {'global': False, 'args': ['service', 'to_int']}, 'shutdown_program': {'global': True, 'args': []}, 'start_accepting_passive_host_checks': {'global': True, 'args': []}, 'start_accepting_passive_svc_checks': {'global': True, 'args': []}, 'start_executing_host_checks': {'global': True, 'args': []}, 'start_executing_svc_checks': {'global': True, 'args': []}, 'stop_accepting_passive_host_checks': {'global': True, 'args': []}, 'stop_accepting_passive_svc_checks': {'global': True, 'args': []}, 'stop_executing_host_checks': {'global': True, 'args': []}, 'stop_executing_svc_checks': {'global': True, 'args': []}, 'launch_svc_event_handler': {'global': False, 'args': ['service']}, 'launch_host_event_handler': {'global': False, 'args': ['host']}, # Now internal calls 'add_simple_host_dependency': {'global': False, 'args': ['host', 'host']}, 'del_host_dependency': {'global': False, 'args': ['host', 'host']}, 'add_simple_poller': {'global': True, 'internal': True, 'args': [None, None, None, None]}, } def __init__(self, conf, mode, daemon, accept_unknown=False, log_external_commands=False): """ The command manager is initialized with a `mode` parameter specifying what is to be done with the managed commands. If mode is: - applyer, the user daemon is a scheduler that will execute the command - dispatcher, the user daemon only dispatches the command to an applyer - receiver, the user daemon only receives commands, analyses and then dispatches them to the schedulers Note that the daemon parameter is really a Daemon object except for the scheduler where it is a Scheduler object! If `accept_passive_unknown_check_results` is True, then a Brok will be created even if passive checks are received for unknown host/service else a Warning log will be emitted.. Note: the receiver mode has no configuration :param conf: current configuration :type conf: alignak.objects.Config :param mode: command manager mode :type mode: str :param daemon: :type daemon: alignak.Daemon :param accept_unknown: accept or not unknown passive checks results :type accept_unknown: bool """ self.daemon = daemon self.mode = mode # If we got a conf... if self.mode == 'receiver': self.my_conf = { 'schedulers': daemon.schedulers } else: self.my_conf = conf if conf: self.my_conf = conf self.hosts = conf.hosts self.services = conf.services self.contacts = conf.contacts self.hostgroups = conf.hostgroups self.commands = conf.commands self.servicegroups = conf.servicegroups self.contactgroups = conf.contactgroups self.timeperiods = conf.timeperiods self.cfg_parts = None if self.mode == 'dispatcher': self.cfg_parts = conf.parts self.accept_passive_unknown_check_results = accept_unknown self.log_external_commands = log_external_commands logger.debug("External command manager, log commands: %s, accept unknown check: %s", self.log_external_commands, self.accept_passive_unknown_check_results) # Will change for each command read, so if a command need it, # it can get it self.current_timestamp = 0 def send_an_element(self, element): """Send an element (Brok, Comment,...) to our daemon Use the daemon `add` function if it exists, else raise an error log :param element: elementto be sent :type: alignak.Brok, or Comment, or Downtime, ... :return: """ # Comment this log because it raises an encoding exception on Travis CI with python 2.7! # logger.debug("Sending to %s for %s", self.daemon, element) if hasattr(self.daemon, "add"): func = getattr(self.daemon, "add") if isinstance(func, collections.Callable): try: func(element) except Exception as exp: # pylint: disable=broad-except logger.critical("Daemon report exception: %s", exp) return logger.critical("External command or Brok could not be sent to any daemon!") def resolve_command(self, excmd): """Parse command and dispatch it (to schedulers for example) if necessary If the command is not global it will be executed. :param excmd: external command to handle :type excmd: alignak.external_command.ExternalCommand :return: result of command parsing. None for an invalid command. """ # Maybe the command is invalid. Bailout try: command = excmd.cmd_line except AttributeError as exp: # pragma: no cover, simple protection logger.warning("resolve_command, error with command %s", excmd) logger.exception("Exception: %s", exp) return None # Parse command command = command.strip() cmd = self.get_command_and_args(command, excmd) if cmd is None: return cmd # If we are a receiver, bail out here... do not try to execute the command if self.mode == 'receiver' and not cmd.get('internal', False): return cmd if self.mode == 'applyer' and self.log_external_commands: make_a_log = True # #912: only log an external command if it is not a passive check if self.my_conf.log_passive_checks and cmd['c_name'] \ in ['process_host_check_result', 'process_service_check_result']: # Do not log the command make_a_log = False if make_a_log: # I am a command dispatcher, notifies to my arbiter self.send_an_element(make_monitoring_log('info', 'EXTERNAL COMMAND: ' + command)) if not cmd['global']: # Execute the command c_name = cmd['c_name'] args = cmd['args'] logger.debug("Execute command: %s %s", c_name, str(args)) logger.debug("Command time measurement: %s (%d s)", excmd.creation_timestamp, time.time() - excmd.creation_timestamp) statsmgr.timer('external-commands.latency', time.time() - excmd.creation_timestamp) getattr(self, c_name)(*args) else: # Send command to all our schedulers for scheduler_link in self.my_conf.schedulers: logger.debug("Preparing an external command '%s' for the scheduler %s", excmd, scheduler_link.name) scheduler_link.pushed_commands.append(excmd.cmd_line) return cmd def search_host_and_dispatch(self, host_name, command, extcmd): # pylint: disable=too-many-branches """Try to dispatch a command for a specific host (so specific scheduler) because this command is related to a host (change notification interval for example) :param host_name: host name to search :type host_name: str :param command: command line :type command: str :param extcmd: external command object (the object will be added to sched commands list) :type extcmd: alignak.external_command.ExternalCommand :return: None """ logger.debug("Calling search_host_and_dispatch for %s", host_name) host_found = False # If we are a receiver, just look in the receiver if self.mode == 'receiver': logger.debug("Receiver is searching a scheduler for the external command %s %s", host_name, command) scheduler_link = self.daemon.get_scheduler_from_hostname(host_name) if scheduler_link: host_found = True logger.debug("Receiver pushing external command to scheduler %s", scheduler_link.name) scheduler_link.pushed_commands.append(extcmd) else: logger.warning("I did not found a scheduler for the host: %s", host_name) else: for cfg_part in list(self.cfg_parts.values()): if cfg_part.hosts.find_by_name(host_name) is not None: logger.debug("Host %s found in a configuration", host_name) if cfg_part.is_assigned: host_found = True scheduler_link = cfg_part.scheduler_link logger.debug("Sending command to the scheduler %s", scheduler_link.name) scheduler_link.push_external_commands([command]) # scheduler_link.my_daemon.external_commands.append(command) break else: logger.warning("Problem: the host %s was found in a configuration, " "but this configuration is not assigned to any scheduler!", host_name) if not host_found: if self.accept_passive_unknown_check_results: brok = self.get_unknown_check_result_brok(command) if brok: self.send_an_element(brok) else: logger.warning("External command was received for the host '%s', " "but the host could not be found! Command is: %s", host_name, command) else: logger.warning("External command was received for host '%s', " "but the host could not be found!", host_name) @staticmethod def get_unknown_check_result_brok(cmd_line): """Create unknown check result brok and fill it with command data :param cmd_line: command line to extract data :type cmd_line: str :return: unknown check result brok :rtype: alignak.objects.brok.Brok """ match = re.match( r'^\[([0-9]{10})] PROCESS_(SERVICE)_CHECK_RESULT;' r'([^\;]*);([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line) if not match: match = re.match( r'^\[([0-9]{10})] PROCESS_(HOST)_CHECK_RESULT;' r'([^\;]*);([^\;]*);([^\|]*)(?:\|(.*))?', cmd_line) if not match: return None data = { 'time_stamp': int(match.group(1)), 'host_name': match.group(3), } if match.group(2) == 'SERVICE': data['service_description'] = match.group(4) data['return_code'] = match.group(5) data['output'] = match.group(6) data['perf_data'] = match.group(7) else: data['return_code'] = match.group(4) data['output'] = match.group(5) data['perf_data'] = match.group(6) return Brok({'type': 'unknown_%s_check_result' % match.group(2).lower(), 'data': data}) def get_command_and_args(self, command, extcmd=None): # pylint: disable=too-many-return-statements, too-many-nested-blocks # pylint: disable=too-many-locals,too-many-branches,too-many-statements """Parse command and get args :param command: command line to parse :type command: str :param extcmd: external command object (used to dispatch) :type extcmd: None | object :return: Dict containing command and arg :: {'global': False, 'c_name': c_name, 'args': args} :rtype: dict | None """ # danger!!! passive check results with perfdata elts = split_semicolon(command) try: timestamp, c_name = elts[0].split() except ValueError as exp: splitted_command = elts[0].split() if len(splitted_command) == 1: # Assume no timestamp and only a command timestamp = "[%s]" % int(time.time()) logger.warning("Missing timestamp in command '%s', using %s as a timestamp.", elts[0], timestamp) c_name = elts[0].split()[0] else: logger.warning("Malformed command '%s'", command) # logger.exception("Malformed command exception: %s", exp) if self.log_external_commands: # The command failed, make a monitoring log to inform self.send_an_element(make_monitoring_log( 'error', "Malformed command: '%s'" % command)) return None c_name = c_name.lower() # Is timestamp already an integer value? try: timestamp = int(timestamp) except ValueError as exp: # Else, remove enclosing characters: [], (), {}, ... timestamp = timestamp[1:-1] # Finally, check that the timestamp is really a timestamp try: self.current_timestamp = int(timestamp) except ValueError as exp: logger.warning("Malformed command '%s'", command) # logger.exception("Malformed command exception: %s", exp) if self.log_external_commands: # The command failed, make a monitoring log to inform self.send_an_element(make_monitoring_log( 'error', "Malformed command: '%s'" % command)) return None if c_name not in ExternalCommandManager.commands: logger.warning("External command '%s' is not recognized, sorry", c_name) if self.log_external_commands: # The command failed, make a monitoring log to inform self.send_an_element(make_monitoring_log( 'error', "Command '%s' is not recognized, sorry" % command)) return None # Split again based on the number of args we expect. We cannot split # on every ; because this character may appear in the perfdata of # passive check results. entry = ExternalCommandManager.commands[c_name] # Look if the command is purely internal (Alignak) or not internal = False if 'internal' in entry and entry['internal']: internal = True numargs = len(entry['args']) if numargs and 'service' in entry['args']: numargs += 1 elts = split_semicolon(command, numargs) logger.debug("mode= %s, global= %s", self.mode, str(entry['global'])) if self.mode in ['dispatcher', 'receiver'] and entry['global']: if not internal: logger.debug("Command '%s' is a global one, we resent it to all schedulers", c_name) return {'global': True, 'cmd': command} args = [] i = 1 in_service = False tmp_host = '' obsolete_arg = 0 try: for elt in elts[1:]: try: elt = elt.decode('utf8', 'ignore') except AttributeError: # Python 3 will raise an error... pass except UnicodeEncodeError: pass logger.debug("Searching for a new arg: %s (%d)", elt, i) val = elt.strip() if val.endswith('\n'): val = val[:-1] logger.debug("For command arg: %s", val) if not in_service: type_searched = entry['args'][i - 1] logger.debug("Type searched: %s", type_searched) if type_searched == 'host': if self.mode == 'dispatcher' or self.mode == 'receiver': self.search_host_and_dispatch(val, command, extcmd) return None host = self.hosts.find_by_name(val) if host is None: if self.accept_passive_unknown_check_results: brok = self.get_unknown_check_result_brok(command) if brok: self.daemon.add_brok(brok) else: logger.warning("A command was received for the host '%s', " "but the host could not be found!", val) return None args.append(host) elif type_searched == 'contact': contact = self.contacts.find_by_name(val) if contact is not None: args.append(contact) elif type_searched == 'time_period': timeperiod = self.timeperiods.find_by_name(val) if timeperiod is not None: args.append(timeperiod) elif type_searched == 'obsolete': obsolete_arg += 1 elif type_searched == 'to_bool': args.append(to_bool(val)) elif type_searched == 'to_int': args.append(to_int(val)) elif type_searched in ('author', None): args.append(val) elif type_searched == 'command': command = self.commands.find_by_name(val) if command is not None: # the find will be redone by # the commandCall creation, but != None # is useful so a bad command will be caught args.append(val) elif type_searched == 'host_group': hostgroup = self.hostgroups.find_by_name(val) if hostgroup is not None: args.append(hostgroup) elif type_searched == 'service_group': servicegroup = self.servicegroups.find_by_name(val) if servicegroup is not None: args.append(servicegroup) elif type_searched == 'contact_group': contactgroup = self.contactgroups.find_by_name(val) if contactgroup is not None: args.append(contactgroup) # special case: service are TWO args host;service, so one more loop # to get the two parts elif type_searched == 'service': in_service = True tmp_host = elt.strip() if tmp_host[-1] == '\n': tmp_host = tmp_host[:-1] if self.mode == 'dispatcher': self.search_host_and_dispatch(tmp_host, command, extcmd) return None i += 1 else: in_service = False srv_name = elt if srv_name[-1] == '\n': srv_name = srv_name[:-1] # If we are in a receiver, bailout now. if self.mode == 'receiver': self.search_host_and_dispatch(tmp_host, command, extcmd) return None serv = self.services.find_srv_by_name_and_hostname(tmp_host, srv_name) if serv is None: if self.accept_passive_unknown_check_results: brok = self.get_unknown_check_result_brok(command) self.send_an_element(brok) else: logger.warning("A command was received for the service '%s' on " "host '%s', but the service could not be found!", srv_name, tmp_host) return None args.append(serv) logger.debug("Got args: %s", args) except IndexError as exp: logger.warning("Sorry, the arguments for the command '%s' are not correct") logger.exception("Arguments parsing exception: %s", exp) if self.log_external_commands: # The command failed, make a monitoring log to inform self.send_an_element(make_monitoring_log( 'error', "Arguments are not correct for the command: '%s'" % command)) else: if len(args) == (len(entry['args']) - obsolete_arg): return { 'global': False, 'internal': internal, 'c_name': c_name, 'args': args } logger.warning("Sorry, the arguments for the command '%s' are not correct (%s)", command, (args)) if self.log_external_commands: # The command failed, make a monitoring log to inform self.send_an_element(make_monitoring_log( 'error', "Arguments are not correct for the command: '%s'" % command)) return None @staticmethod def change_contact_modsattr(contact, value): """Change contact modified service attribute value Format of the line that triggers function call:: CHANGE_CONTACT_MODSATTR;<contact_name>;<value> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param value: new value to set :type value: str :return: None """ # todo: deprecate this contact.modified_service_attributes = int(value) @staticmethod def change_contact_modhattr(contact, value): """Change contact modified host attribute value Format of the line that triggers function call:: CHANGE_CONTACT_MODHATTR;<contact_name>;<value> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param value: new value to set :type value:str :return: None """ # todo: deprecate this contact.modified_host_attributes = int(value) @staticmethod def change_contact_modattr(contact, value): """Change contact modified attribute value Format of the line that triggers function call:: CHANGE_CONTACT_MODATTR;<contact_name>;<value> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param value: new value to set :type value: str :return: None """ # todo: deprecate this contact.modified_attributes = int(value) def change_contact_host_notification_timeperiod(self, contact, notification_timeperiod): """Change contact host notification timeperiod value Format of the line that triggers function call:: CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param notification_timeperiod: timeperiod to set :type notification_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None """ # todo: deprecate this contact.modified_host_attributes |= DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value contact.host_notification_period = notification_timeperiod self.send_an_element(contact.get_update_status_brok()) def add_svc_comment(self, service, author, comment): """Add a service comment Format of the line that triggers function call:: ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment> :param service: service to add the comment :type service: alignak.objects.service.Service :param author: author name :type author: str :param comment: text comment :type comment: str :return: None """ data = { 'author': author, 'comment': comment, 'comment_type': 2, 'entry_type': 1, 'source': 1, 'expires': False, 'ref': service.uuid } comm = Comment(data) service.add_comment(comm) self.send_an_element(service.get_update_status_brok()) try: brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s" % (self.hosts[service.host].get_name(), service.get_name(), str(author, 'utf-8'), str(comment, 'utf-8'))) except TypeError: brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s" % (self.hosts[service.host].get_name(), service.get_name(), author, comment)) self.send_an_element(brok) self.send_an_element(comm.get_comment_brok( self.hosts[service.host].get_name(), service.get_name())) def add_host_comment(self, host, author, comment): """Add a host comment Format of the line that triggers function call:: ADD_HOST_COMMENT;<host_name>;<persistent:obsolete>;<author>;<comment> :param host: host to add the comment :type host: alignak.objects.host.Host :param author: author name :type author: str :param comment: text comment :type comment: str :return: None """ data = { 'author': author, 'comment': comment, 'comment_type': 1, 'entry_type': 1, 'source': 1, 'expires': False, 'ref': host.uuid } comm = Comment(data) host.add_comment(comm) self.send_an_element(host.get_update_status_brok()) try: brok = make_monitoring_log('info', "HOST COMMENT: %s;%s;%s" % (host.get_name(), str(author, 'utf-8'), str(comment, 'utf-8'))) except TypeError: brok = make_monitoring_log('info', "HOST COMMENT: %s;%s;%s" % (host.get_name(), author, comment)) self.send_an_element(brok) self.send_an_element(comm.get_comment_brok(self.hosts[host].get_name())) def acknowledge_svc_problem(self, service, sticky, notify, author, comment): """Acknowledge a service problem Format of the line that triggers function call:: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;<sticky>;<notify>; <persistent:obsolete>;<author>;<comment> :param service: service to acknowledge the problem :type service: alignak.objects.service.Service :param sticky: if sticky == 2, the acknowledge will remain until the service returns to an OK state else the acknowledge will be removed as soon as the service state changes :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None """ notification_period = None if getattr(service, 'notification_period', None) is not None: notification_period = self.daemon.timeperiods[service.notification_period] service.acknowledge_problem(notification_period, self.hosts, self.services, sticky, notify, author, comment) def acknowledge_host_problem(self, host, sticky, notify, author, comment): """Acknowledge a host problem Format of the line that triggers function call:: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>; <comment> :param host: host to acknowledge the problem :type host: alignak.objects.host.Host :param sticky: if sticky == 2, the acknowledge will remain until the host returns to an UP state else the acknowledge will be removed as soon as the host state changes :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None TODO: add a better ACK management """ notification_period = None if getattr(host, 'notification_period', None) is not None: notification_period = self.daemon.timeperiods[host.notification_period] host.acknowledge_problem(notification_period, self.hosts, self.services, sticky, notify, author, comment) def acknowledge_svc_problem_expire(self, service, sticky, notify, end_time, author, comment): """Acknowledge a service problem with expire time for this acknowledgement Format of the line that triggers function call:: ACKNOWLEDGE_SVC_PROBLEM_EXPIRE;<host_name>;<service_description>;<sticky>;<notify>; <persistent:obsolete>;<end_time>;<author>;<comment> :param service: service to acknowledge the problem :type service: alignak.objects.service.Service :param sticky: acknowledge will be always present is host return in UP state :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end) :type end_time: int :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None """ notification_period = None if getattr(service, 'notification_period', None) is not None: notification_period = self.daemon.timeperiods[service.notification_period] service.acknowledge_problem(notification_period, self.hosts, self.services, sticky, notify, author, comment, end_time=end_time) def acknowledge_host_problem_expire(self, host, sticky, notify, end_time, author, comment): """Acknowledge a host problem with expire time for this acknowledgement Format of the line that triggers function call:: ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;<host_name>;<sticky>;<notify>;<persistent:obsolete>; <end_time>;<author>;<comment> :param host: host to acknowledge the problem :type host: alignak.objects.host.Host :param sticky: acknowledge will be always present is host return in UP state :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end) :type end_time: int :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None TODO: add a better ACK management """ notification_period = None if getattr(host, 'notification_period', None) is not None: notification_period = self.daemon.timeperiods[host.notification_period] host.acknowledge_problem(notification_period, self.hosts, self.services, sticky, notify, author, comment, end_time=end_time) def change_contact_svc_notification_timeperiod(self, contact, notification_timeperiod): """Change contact service notification timeperiod value Format of the line that triggers function call:: CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param notification_timeperiod: timeperiod to set :type notification_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None """ contact.modified_service_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value contact.service_notification_period = notification_timeperiod self.send_an_element(contact.get_update_status_brok()) def change_custom_contact_var(self, contact, varname, varvalue): """Change custom contact variable Format of the line that triggers function call:: CHANGE_CUSTOM_CONTACT_VAR;<contact_name>;<varname>;<varvalue> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param varname: variable name to change :type varname: str :param varvalue: variable new value :type varvalue: str :return: None """ if varname.upper() in contact.customs: contact.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value contact.customs[varname.upper()] = varvalue self.send_an_element(contact.get_update_status_brok()) def change_custom_host_var(self, host, varname, varvalue): """Change custom host variable Format of the line that triggers function call:: CHANGE_CUSTOM_HOST_VAR;<host_name>;<varname>;<varvalue> :param host: host to edit :type host: alignak.objects.host.Host :param varname: variable name to change :type varname: str :param varvalue: variable new value :type varvalue: str :return: None """ if varname.upper() in host.customs: host.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value host.customs[varname.upper()] = varvalue self.send_an_element(host.get_update_status_brok()) def change_custom_svc_var(self, service, varname, varvalue): """Change custom service variable Format of the line that triggers function call:: CHANGE_CUSTOM_SVC_VAR;<host_name>;<service_description>;<varname>;<varvalue> :param service: service to edit :type service: alignak.objects.service.Service :param varname: variable name to change :type varvalue: str :param varvalue: variable new value :type varname: str :return: None """ if varname.upper() in service.customs: service.modified_attributes |= DICT_MODATTR["MODATTR_CUSTOM_VARIABLE"].value service.customs[varname.upper()] = varvalue self.send_an_element(service.get_update_status_brok()) def change_global_host_event_handler(self, event_handler_command): """DOES NOTHING (should change global host event handler) Format of the line that triggers function call:: CHANGE_GLOBAL_HOST_EVENT_HANDLER;<event_handler_command> :param event_handler_command: new event handler :type event_handler_command: :return: None TODO: DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value """ logger.warning("The external command 'CHANGE_GLOBAL_HOST_EVENT_HANDLER' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'CHANGE_GLOBAL_HOST_EVENT_HANDLER: this command is not implemented!')) def change_global_svc_event_handler(self, event_handler_command): """DOES NOTHING (should change global service event handler) Format of the line that triggers function call:: CHANGE_GLOBAL_SVC_EVENT_HANDLER;<event_handler_command> :param event_handler_command: new event handler :type event_handler_command: :return: None TODO: DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value """ logger.warning("The external command 'CHANGE_GLOBAL_SVC_EVENT_HANDLER' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'CHANGE_GLOBAL_SVC_EVENT_HANDLER: this command is not implemented!')) def change_host_check_command(self, host, check_command): """Modify host check command Format of the line that triggers function call:: CHANGE_HOST_CHECK_COMMAND;<host_name>;<check_command> :param host: host to modify check command :type host: alignak.objects.host.Host :param check_command: command line :type check_command: :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_COMMAND"].value host.change_check_command(check_command, self.commands) self.send_an_element(host.get_update_status_brok()) def change_host_check_timeperiod(self, host, timeperiod): """Modify host check timeperiod Format of the line that triggers function call:: CHANGE_HOST_CHECK_TIMEPERIOD;<host_name>;<timeperiod> :param host: host to modify check timeperiod :type host: alignak.objects.host.Host :param timeperiod: timeperiod object :type timeperiod: alignak.objects.timeperiod.Timeperiod :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_TIMEPERIOD"].value host.check_period = timeperiod self.send_an_element(host.get_update_status_brok()) def change_host_event_handler(self, host, event_handler_command): """Modify host event handler Format of the line that triggers function call:: CHANGE_HOST_EVENT_HANDLER;<host_name>;<event_handler_command> :param host: host to modify event handler :type host: alignak.objects.host.Host :param event_handler_command: event handler command line :type event_handler_command: :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value host.change_event_handler(event_handler_command, self.commands) self.send_an_element(host.get_update_status_brok()) def change_host_snapshot_command(self, host, snapshot_command): """Modify host snapshot command Format of the line that triggers function call:: CHANGE_HOST_SNAPSHOT_COMMAND;<host_name>;<event_handler_command> :param host: host to modify snapshot command :type host: alignak.objects.host.Host :param snapshot_command: snapshot command command line :type snapshot_command: :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value host.change_snapshot_command(snapshot_command, self.commands) self.send_an_element(host.get_update_status_brok()) def change_host_modattr(self, host, value): """Change host modified attributes Format of the line that triggers function call:: CHANGE_HOST_MODATTR;<host_name>;<value> For boolean attributes, toggles the service attribute state (enable/disable) For non boolean attribute, only indicates that the corresponding attribute is to be saved in the retention. Value can be: MODATTR_NONE 0 MODATTR_NOTIFICATIONS_ENABLED 1 MODATTR_ACTIVE_CHECKS_ENABLED 2 MODATTR_PASSIVE_CHECKS_ENABLED 4 MODATTR_EVENT_HANDLER_ENABLED 8 MODATTR_FLAP_DETECTION_ENABLED 16 MODATTR_PERFORMANCE_DATA_ENABLED 64 MODATTR_EVENT_HANDLER_COMMAND 256 MODATTR_CHECK_COMMAND 512 MODATTR_NORMAL_CHECK_INTERVAL 1024 MODATTR_RETRY_CHECK_INTERVAL 2048 MODATTR_MAX_CHECK_ATTEMPTS 4096 MODATTR_FRESHNESS_CHECKS_ENABLED 8192 MODATTR_CHECK_TIMEPERIOD 16384 MODATTR_CUSTOM_VARIABLE 32768 MODATTR_NOTIFICATION_TIMEPERIOD 65536 :param host: host to edit :type host: alignak.objects.host.Host :param value: new value to set :type value: str :return: None """ # todo: deprecate this # We need to change each of the needed attributes. previous_value = host.modified_attributes changes = int(value) # For all boolean and non boolean attributes for modattr in ["MODATTR_NOTIFICATIONS_ENABLED", "MODATTR_ACTIVE_CHECKS_ENABLED", "MODATTR_PASSIVE_CHECKS_ENABLED", "MODATTR_EVENT_HANDLER_ENABLED", "MODATTR_FLAP_DETECTION_ENABLED", "MODATTR_PERFORMANCE_DATA_ENABLED", "MODATTR_FRESHNESS_CHECKS_ENABLED", "MODATTR_EVENT_HANDLER_COMMAND", "MODATTR_CHECK_COMMAND", "MODATTR_NORMAL_CHECK_INTERVAL", "MODATTR_RETRY_CHECK_INTERVAL", "MODATTR_MAX_CHECK_ATTEMPTS", "MODATTR_FRESHNESS_CHECKS_ENABLED", "MODATTR_CHECK_TIMEPERIOD", "MODATTR_CUSTOM_VARIABLE", "MODATTR_NOTIFICATION_TIMEPERIOD"]: if changes & DICT_MODATTR[modattr].value: # Toggle the concerned service attribute setattr(host, DICT_MODATTR[modattr].attribute, not getattr(host, DICT_MODATTR[modattr].attribute)) host.modified_attributes = previous_value ^ changes # And we need to push the information to the scheduler. self.send_an_element(host.get_update_status_brok()) def change_max_host_check_attempts(self, host, check_attempts): """Modify max host check attempt Format of the line that triggers function call:: CHANGE_MAX_HOST_CHECK_ATTEMPTS;<host_name>;<check_attempts> :param host: host to edit :type host: alignak.objects.host.Host :param check_attempts: new value to set :type check_attempts: int :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].value host.max_check_attempts = check_attempts if host.state_type == u'HARD' and host.state == u'UP' and host.attempt > 1: host.attempt = host.max_check_attempts self.send_an_element(host.get_update_status_brok()) def change_max_svc_check_attempts(self, service, check_attempts): """Modify max service check attempt Format of the line that triggers function call:: CHANGE_MAX_SVC_CHECK_ATTEMPTS;<host_name>;<service_description>;<check_attempts> :param service: service to edit :type service: alignak.objects.service.Service :param check_attempts: new value to set :type check_attempts: int :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].value service.max_check_attempts = check_attempts if service.state_type == u'HARD' and service.state == u'OK' and service.attempt > 1: service.attempt = service.max_check_attempts self.send_an_element(service.get_update_status_brok()) def change_normal_host_check_interval(self, host, check_interval): """Modify host check interval Format of the line that triggers function call:: CHANGE_NORMAL_HOST_CHECK_INTERVAL;<host_name>;<check_interval> :param host: host to edit :type host: alignak.objects.host.Host :param check_interval: new value to set :type check_interval: :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].value old_interval = host.check_interval host.check_interval = check_interval # If there were no regular checks (interval=0), then schedule # a check immediately. if old_interval == 0 and host.checks_enabled: host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=int(time.time())) self.send_an_element(host.get_update_status_brok()) def change_normal_svc_check_interval(self, service, check_interval): """Modify service check interval Format of the line that triggers function call:: CHANGE_NORMAL_SVC_CHECK_INTERVAL;<host_name>;<service_description>;<check_interval> :param service: service to edit :type service: alignak.objects.service.Service :param check_interval: new value to set :type check_interval: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].value old_interval = service.check_interval service.check_interval = check_interval # If there were no regular checks (interval=0), then schedule # a check immediately. if old_interval == 0 and service.checks_enabled: service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=int(time.time())) self.send_an_element(service.get_update_status_brok()) def change_retry_host_check_interval(self, host, check_interval): """Modify host retry interval Format of the line that triggers function call:: CHANGE_RETRY_HOST_CHECK_INTERVAL;<host_name>;<check_interval> :param host: host to edit :type host: alignak.objects.host.Host :param check_interval: new value to set :type check_interval: :return: None """ host.modified_attributes |= DICT_MODATTR["MODATTR_RETRY_CHECK_INTERVAL"].value host.retry_interval = check_interval self.send_an_element(host.get_update_status_brok()) def change_retry_svc_check_interval(self, service, check_interval): """Modify service retry interval Format of the line that triggers function call:: CHANGE_RETRY_SVC_CHECK_INTERVAL;<host_name>;<service_description>;<check_interval> :param service: service to edit :type service: alignak.objects.service.Service :param check_interval: new value to set :type check_interval: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_RETRY_CHECK_INTERVAL"].value service.retry_interval = check_interval self.send_an_element(service.get_update_status_brok()) def change_svc_check_command(self, service, check_command): """Modify service check command Format of the line that triggers function call:: CHANGE_SVC_CHECK_COMMAND;<host_name>;<service_description>;<check_command> :param service: service to modify check command :type service: alignak.objects.service.Service :param check_command: command line :type check_command: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_COMMAND"].value service.change_check_command(check_command, self.commands) self.send_an_element(service.get_update_status_brok()) def change_svc_check_timeperiod(self, service, check_timeperiod): """Modify service check timeperiod Format of the line that triggers function call:: CHANGE_SVC_CHECK_TIMEPERIOD;<host_name>;<service_description>;<check_timeperiod> :param service: service to modify check timeperiod :type service: alignak.objects.service.Service :param check_timeperiod: timeperiod object :type check_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_CHECK_TIMEPERIOD"].value service.check_period = check_timeperiod self.send_an_element(service.get_update_status_brok()) def change_svc_event_handler(self, service, event_handler_command): """Modify service event handler Format of the line that triggers function call:: CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command> :param service: service to modify event handler :type service: alignak.objects.service.Service :param event_handler_command: event handler command line :type event_handler_command: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value service.change_event_handler(event_handler_command, self.commands) self.send_an_element(service.get_update_status_brok()) def change_svc_snapshot_command(self, service, snapshot_command): """Modify host snapshot command Format of the line that triggers function call:: CHANGE_SVC_SNAPSHOT_COMMAND;<host_name>;<event_handler_command> :param service: service to modify snapshot command :type service: alignak.objects.service.Service :param snapshot_command: snapshot command command line :type snapshot_command: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value service.change_snapshot_command(snapshot_command, self.commands) self.send_an_element(service.get_update_status_brok()) def change_svc_modattr(self, service, value): """Change service modified attributes Format of the line that triggers function call:: CHANGE_SVC_MODATTR;<host_name>;<service_description>;<value> For boolean attributes, toggles the service attribute state (enable/disable) For non boolean attribute, only indicates that the corresponding attribute is to be saved in the retention. Value can be: MODATTR_NONE 0 MODATTR_NOTIFICATIONS_ENABLED 1 MODATTR_ACTIVE_CHECKS_ENABLED 2 MODATTR_PASSIVE_CHECKS_ENABLED 4 MODATTR_EVENT_HANDLER_ENABLED 8 MODATTR_FLAP_DETECTION_ENABLED 16 MODATTR_PERFORMANCE_DATA_ENABLED 64 MODATTR_EVENT_HANDLER_COMMAND 256 MODATTR_CHECK_COMMAND 512 MODATTR_NORMAL_CHECK_INTERVAL 1024 MODATTR_RETRY_CHECK_INTERVAL 2048 MODATTR_MAX_CHECK_ATTEMPTS 4096 MODATTR_FRESHNESS_CHECKS_ENABLED 8192 MODATTR_CHECK_TIMEPERIOD 16384 MODATTR_CUSTOM_VARIABLE 32768 MODATTR_NOTIFICATION_TIMEPERIOD 65536 :param service: service to edit :type service: alignak.objects.service.Service :param value: new value to set / unset :type value: str :return: None """ # todo: deprecate this # We need to change each of the needed attributes. previous_value = service.modified_attributes changes = int(value) # For all boolean and non boolean attributes for modattr in ["MODATTR_NOTIFICATIONS_ENABLED", "MODATTR_ACTIVE_CHECKS_ENABLED", "MODATTR_PASSIVE_CHECKS_ENABLED", "MODATTR_EVENT_HANDLER_ENABLED", "MODATTR_FLAP_DETECTION_ENABLED", "MODATTR_PERFORMANCE_DATA_ENABLED", "MODATTR_FRESHNESS_CHECKS_ENABLED", "MODATTR_EVENT_HANDLER_COMMAND", "MODATTR_CHECK_COMMAND", "MODATTR_NORMAL_CHECK_INTERVAL", "MODATTR_RETRY_CHECK_INTERVAL", "MODATTR_MAX_CHECK_ATTEMPTS", "MODATTR_FRESHNESS_CHECKS_ENABLED", "MODATTR_CHECK_TIMEPERIOD", "MODATTR_CUSTOM_VARIABLE", "MODATTR_NOTIFICATION_TIMEPERIOD"]: if changes & DICT_MODATTR[modattr].value: # Toggle the concerned service attribute setattr(service, DICT_MODATTR[modattr].attribute, not getattr(service, DICT_MODATTR[modattr].attribute)) service.modified_attributes = previous_value ^ changes # And we need to push the information to the scheduler. self.send_an_element(service.get_update_status_brok()) def change_svc_notification_timeperiod(self, service, notification_timeperiod): """Change service notification timeperiod Format of the line that triggers function call:: CHANGE_SVC_NOTIFICATION_TIMEPERIOD;<host_name>;<service_description>; <notification_timeperiod> :param service: service to edit :type service: alignak.objects.service.Service :param notification_timeperiod: timeperiod to set :type notification_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value service.notification_period = notification_timeperiod self.send_an_element(service.get_update_status_brok()) def delay_host_notification(self, host, notification_time): """Modify host first notification delay Format of the line that triggers function call:: DELAY_HOST_NOTIFICATION;<host_name>;<notification_time> :param host: host to edit :type host: alignak.objects.host.Host :param notification_time: new value to set :type notification_time: :return: None """ host.first_notification_delay = notification_time self.send_an_element(host.get_update_status_brok()) def delay_svc_notification(self, service, notification_time): """Modify service first notification delay Format of the line that triggers function call:: DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time> :param service: service to edit :type service: alignak.objects.service.Service :param notification_time: new value to set :type notification_time: :return: None """ service.first_notification_delay = notification_time self.send_an_element(service.get_update_status_brok()) def del_all_contact_downtimes(self, contact): """Delete all contact downtimes Format of the line that triggers function call:: DEL_ALL_CONTACT_DOWNTIMES;<contact_name> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :return: None """ for downtime in contact.downtimes: self.del_contact_downtime(downtime) def del_all_host_comments(self, host): """Delete all host comments Format of the line that triggers function call:: DEL_ALL_HOST_COMMENTS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ comments = list(host.comments.keys()) for uuid in comments: host.del_comment(uuid) self.send_an_element(host.get_update_status_brok()) def del_all_host_downtimes(self, host): """Delete all host downtimes Format of the line that triggers function call:: DEL_ALL_HOST_DOWNTIMES;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for downtime in host.downtimes: self.del_host_downtime(downtime) self.send_an_element(host.get_update_status_brok()) def del_all_svc_comments(self, service): """Delete all service comments Format of the line that triggers function call:: DEL_ALL_SVC_COMMENTS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ comments = list(service.comments.keys()) for uuid in comments: service.del_comment(uuid) self.send_an_element(service.get_update_status_brok()) def del_all_svc_downtimes(self, service): """Delete all service downtime Format of the line that triggers function call:: DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ for downtime in service.downtimes: self.del_svc_downtime(downtime) self.send_an_element(service.get_update_status_brok()) def del_contact_downtime(self, downtime_id): """Delete a contact downtime Format of the line that triggers function call:: DEL_CONTACT_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None """ for item in self.daemon.contacts: if downtime_id in item.downtimes: item.downtimes[downtime_id].cancel(self.daemon.contacts) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_CONTACT_DOWNTIME: downtime id: %s does not exist ' 'and cannot be deleted.' % downtime_id)) def del_host_comment(self, comment_id): """Delete a host comment Format of the line that triggers function call:: DEL_HOST_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None """ for item in self.daemon.hosts: if comment_id in item.comments: item.del_comment(comment_id) self.send_an_element(item.get_update_status_brok()) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_HOST_COMMENT: comment id: %s does not exist ' 'and cannot be deleted.' % comment_id)) def del_host_downtime(self, downtime_id): """Delete a host downtime Format of the line that triggers function call:: DEL_HOST_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None """ broks = [] for item in self.daemon.hosts: if downtime_id in item.downtimes: broks.extend(item.downtimes[downtime_id].cancel(self.daemon.timeperiods, self.daemon.hosts, self.daemon.services)) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_HOST_DOWNTIME: downtime id: %s does not exist ' 'and cannot be deleted.' % downtime_id)) for brok in broks: self.send_an_element(brok) def del_svc_comment(self, comment_id): """Delete a service comment Format of the line that triggers function call:: DEL_SVC_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None """ for svc in self.daemon.services: if comment_id in svc.comments: svc.del_comment(comment_id) self.send_an_element(svc.get_update_status_brok()) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_SVC_COMMENT: comment id: %s does not exist ' 'and cannot be deleted.' % comment_id)) def del_svc_downtime(self, downtime_id): """Delete a service downtime Format of the line that triggers function call:: DEL_SVC_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None """ broks = [] for svc in self.daemon.services: if downtime_id in svc.downtimes: broks.extend(svc.downtimes[downtime_id].cancel(self.daemon.timeperiods, self.daemon.hosts, self.daemon.services)) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_SVC_DOWNTIME: downtime id: %s does not exist ' 'and cannot be deleted.' % downtime_id)) for brok in broks: self.send_an_element(brok) def disable_all_notifications_beyond_host(self, host): """DOES NOTHING (should disable notification beyond a host) Format of the line that triggers function call:: DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None TODO: Implement it """ logger.warning("The external command 'DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST: this command is not implemented!')) def disable_contactgroup_host_notifications(self, contactgroup): """Disable host notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.disable_contact_host_notifications(self.daemon.contacts[contact_id]) def disable_contactgroup_svc_notifications(self, contactgroup): """Disable service notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.disable_contact_svc_notifications(self.daemon.contacts[contact_id]) def disable_contact_host_notifications(self, contact): """Disable host notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_HOST_NOTIFICATIONS;<contact_name> :param contact: contact to disable :type contact: alignak.objects.contact.Contact :return: None """ if contact.host_notifications_enabled: contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value contact.host_notifications_enabled = False self.send_an_element(contact.get_update_status_brok()) def disable_contact_svc_notifications(self, contact): """Disable service notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name> :param contact: contact to disable :type contact: alignak.objects.contact.Contact :return: None """ if contact.service_notifications_enabled: contact.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value contact.service_notifications_enabled = False self.send_an_element(contact.get_update_status_brok()) def disable_event_handlers(self): """Disable event handlers (globally) Format of the line that triggers function call:: DISABLE_EVENT_HANDLERS :return: None """ # todo: #783 create a dedicated brok for global parameters if self.my_conf.enable_event_handlers: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value self.my_conf.enable_event_handlers = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def disable_flap_detection(self): """Disable flap detection (globally) Format of the line that triggers function call:: DISABLE_FLAP_DETECTION :return: None """ # todo: #783 create a dedicated brok for global parameters if self.my_conf.enable_flap_detection: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value self.my_conf.enable_flap_detection = False self.my_conf.explode_global_conf() self.daemon.update_program_status() # Is need, disable flap state for hosts and services for service in self.my_conf.services: if service.is_flapping: service.is_flapping = False service.flapping_changes = [] self.send_an_element(service.get_update_status_brok()) for host in self.my_conf.hosts: if host.is_flapping: host.is_flapping = False host.flapping_changes = [] self.send_an_element(host.get_update_status_brok()) def disable_hostgroup_host_checks(self, hostgroup): """Disable host checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.disable_host_check(self.daemon.hosts[host_id]) def disable_hostgroup_host_notifications(self, hostgroup): """Disable host notifications for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.disable_host_notifications(self.daemon.hosts[host_id]) def disable_hostgroup_passive_host_checks(self, hostgroup): """Disable host passive checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.disable_passive_host_checks(self.daemon.hosts[host_id]) def disable_hostgroup_passive_svc_checks(self, hostgroup): """Disable service passive checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.disable_passive_svc_checks(self.daemon.services[service_id]) def disable_hostgroup_svc_checks(self, hostgroup): """Disable service checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.disable_svc_check(self.daemon.services[service_id]) def disable_hostgroup_svc_notifications(self, hostgroup): """Disable service notifications for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.disable_svc_notifications(self.daemon.services[service_id]) def disable_host_and_child_notifications(self, host): """DOES NOTHING (Should disable host notifications and its child) Format of the line that triggers function call:: DISABLE_HOST_AND_CHILD_NOTIFICATIONS;<host_name :param host: host to edit :type host: alignak.objects.host.Host :return: None """ logger.warning("The external command 'DISABLE_HOST_AND_CHILD_NOTIFICATIONS' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'DISABLE_HOST_AND_CHILD_NOTIFICATIONS: this command is not implemented!')) def disable_host_check(self, host): """Disable checks for a host Format of the line that triggers function call:: DISABLE_HOST_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.active_checks_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value host.disable_active_checks(self.daemon.checks) self.send_an_element(host.get_update_status_brok()) def disable_host_event_handler(self, host): """Disable event handlers for a host Format of the line that triggers function call:: DISABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.event_handler_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value host.event_handler_enabled = False self.send_an_element(host.get_update_status_brok()) def disable_host_flap_detection(self, host): """Disable flap detection for a host Format of the line that triggers function call:: DISABLE_HOST_FLAP_DETECTION;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.flap_detection_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value host.flap_detection_enabled = False # Maybe the host was flapping, if so, stop flapping if host.is_flapping: host.is_flapping = False host.flapping_changes = [] self.send_an_element(host.get_update_status_brok()) def disable_host_freshness_check(self, host): """Disable freshness check for a host Format of the line that triggers function call:: DISABLE_HOST_FRESHNESS_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.check_freshness: host.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value host.check_freshness = False self.send_an_element(host.get_update_status_brok()) def disable_host_freshness_checks(self): """Disable freshness checks (globally) Format of the line that triggers function call:: DISABLE_HOST_FRESHNESS_CHECKS :return: None """ if self.my_conf.check_host_freshness: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value self.my_conf.check_host_freshness = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def disable_host_notifications(self, host): """Disable notifications for a host Format of the line that triggers function call:: DISABLE_HOST_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.notifications_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value host.notifications_enabled = False self.send_an_element(host.get_update_status_brok()) def disable_host_svc_checks(self, host): """Disable service checks for a host Format of the line that triggers function call:: DISABLE_HOST_SVC_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for service_id in host.services: if service_id in self.daemon.services: service = self.daemon.services[service_id] self.disable_svc_check(service) self.send_an_element(service.get_update_status_brok()) def disable_host_svc_notifications(self, host): """Disable services notifications for a host Format of the line that triggers function call:: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for service_id in host.services: if service_id in self.daemon.services: service = self.daemon.services[service_id] self.disable_svc_notifications(service) self.send_an_element(service.get_update_status_brok()) def disable_notifications(self): """Disable notifications (globally) Format of the line that triggers function call:: DISABLE_NOTIFICATIONS :return: None """ # todo: #783 create a dedicated brok for global parameters if self.my_conf.enable_notifications: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value self.my_conf.enable_notifications = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def disable_passive_host_checks(self, host): """Disable passive checks for a host Format of the line that triggers function call:: DISABLE_PASSIVE_HOST_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.passive_checks_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value host.passive_checks_enabled = False self.send_an_element(host.get_update_status_brok()) def disable_passive_svc_checks(self, service): """Disable passive checks for a service Format of the line that triggers function call:: DISABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.passive_checks_enabled: service.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value service.passive_checks_enabled = False self.send_an_element(service.get_update_status_brok()) def disable_performance_data(self): """Disable performance data processing (globally) Format of the line that triggers function call:: DISABLE_PERFORMANCE_DATA :return: None """ # todo: #783 create a dedicated brok for global parameters if self.my_conf.process_performance_data: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value self.my_conf.process_performance_data = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def disable_servicegroup_host_checks(self, servicegroup): """Disable host checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.disable_host_check(self.daemon.hosts[host_id]) def disable_servicegroup_host_notifications(self, servicegroup): """Disable host notifications for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.disable_host_notifications(self.daemon.hosts[host_id]) def disable_servicegroup_passive_host_checks(self, servicegroup): """Disable passive host checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.disable_passive_host_checks(self.daemon.hosts[host_id]) def disable_servicegroup_passive_svc_checks(self, servicegroup): """Disable passive service checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): self.disable_passive_svc_checks(self.daemon.services[service_id]) def disable_servicegroup_svc_checks(self, servicegroup): """Disable service checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): self.disable_svc_check(self.daemon.services[service_id]) def disable_servicegroup_svc_notifications(self, servicegroup): """Disable service notifications for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): self.disable_svc_notifications(self.daemon.services[service_id]) def disable_service_flap_detection(self, service): """Disable flap detection for a service Format of the line that triggers function call:: DISABLE_SERVICE_FLAP_DETECTION;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.flap_detection_enabled: service.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value service.flap_detection_enabled = False # Maybe the service was flapping, if so, stop flapping if service.is_flapping: service.is_flapping = False service.flapping_changes = [] self.send_an_element(service.get_update_status_brok()) def disable_svc_freshness_check(self, service): """Disable freshness check for a service Format of the line that triggers function call:: DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.check_freshness: service.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value service.check_freshness = False self.send_an_element(service.get_update_status_brok()) def disable_service_freshness_checks(self): """Disable service freshness checks (globally) Format of the line that triggers function call:: DISABLE_SERVICE_FRESHNESS_CHECKS :return: None """ if self.my_conf.check_service_freshness: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value self.my_conf.check_service_freshness = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def disable_svc_check(self, service): """Disable checks for a service Format of the line that triggers function call:: DISABLE_SVC_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.active_checks_enabled: service.disable_active_checks(self.daemon.checks) service.modified_attributes |= \ DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value self.send_an_element(service.get_update_status_brok()) def disable_svc_event_handler(self, service): """Disable event handlers for a service Format of the line that triggers function call:: DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.event_handler_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value service.event_handler_enabled = False self.send_an_element(service.get_update_status_brok()) def disable_svc_flap_detection(self, service): """Disable flap detection for a service Format of the line that triggers function call:: DISABLE_SVC_FLAP_DETECTION;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ self.disable_service_flap_detection(service) def disable_svc_notifications(self, service): """Disable notifications for a service Format of the line that triggers function call:: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if service.notifications_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value service.notifications_enabled = False self.send_an_element(service.get_update_status_brok()) def enable_all_notifications_beyond_host(self, host): """DOES NOTHING (should enable notification beyond a host) Format of the line that triggers function call:: ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None TODO: Implement it """ logger.warning("The external command 'ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST: this command is not implemented!')) def enable_contactgroup_host_notifications(self, contactgroup): """Enable host notifications for a contactgroup Format of the line that triggers function call:: ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to enable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.enable_contact_host_notifications(self.daemon.contacts[contact_id]) def enable_contactgroup_svc_notifications(self, contactgroup): """Enable service notifications for a contactgroup Format of the line that triggers function call:: ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to enable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.enable_contact_svc_notifications(self.daemon.contacts[contact_id]) def enable_contact_host_notifications(self, contact): """Enable host notifications for a contact Format of the line that triggers function call:: ENABLE_CONTACT_HOST_NOTIFICATIONS;<contact_name> :param contact: contact to enable :type contact: alignak.objects.contact.Contact :return: None """ if not contact.host_notifications_enabled: contact.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value contact.host_notifications_enabled = True self.send_an_element(contact.get_update_status_brok()) def enable_contact_svc_notifications(self, contact): """Enable service notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name> :param contact: contact to enable :type contact: alignak.objects.contact.Contact :return: None """ if not contact.service_notifications_enabled: contact.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value contact.service_notifications_enabled = True self.send_an_element(contact.get_update_status_brok()) def enable_event_handlers(self): """Enable event handlers (globally) Format of the line that triggers function call:: ENABLE_EVENT_HANDLERS :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.enable_event_handlers: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value self.my_conf.enable_event_handlers = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def enable_flap_detection(self): """Enable flap detection (globally) Format of the line that triggers function call:: ENABLE_FLAP_DETECTION :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.enable_flap_detection: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value self.my_conf.enable_flap_detection = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def enable_hostgroup_host_checks(self, hostgroup): """Enable host checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.enable_host_check(self.daemon.hosts[host_id]) def enable_hostgroup_host_notifications(self, hostgroup): """Enable host notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.enable_host_notifications(self.daemon.hosts[host_id]) def enable_hostgroup_passive_host_checks(self, hostgroup): """Enable host passive checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: self.enable_passive_host_checks(self.daemon.hosts[host_id]) def enable_hostgroup_passive_svc_checks(self, hostgroup): """Enable service passive checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.enable_passive_svc_checks(self.daemon.services[service_id]) def enable_hostgroup_svc_checks(self, hostgroup): """Enable service checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.enable_svc_check(self.daemon.services[service_id]) def enable_hostgroup_svc_notifications(self, hostgroup): """Enable service notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.enable_svc_notifications(self.daemon.services[service_id]) def enable_host_and_child_notifications(self, host): """DOES NOTHING (Should enable host notifications and its child) Format of the line that triggers function call:: ENABLE_HOST_AND_CHILD_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ logger.warning("The external command 'ENABLE_HOST_AND_CHILD_NOTIFICATIONS' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'ENABLE_HOST_AND_CHILD_NOTIFICATIONS: this command is not implemented!')) def enable_host_check(self, host): """Enable checks for a host Format of the line that triggers function call:: ENABLE_HOST_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.active_checks_enabled: host.active_checks_enabled = True host.modified_attributes |= \ DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value self.send_an_element(host.get_update_status_brok()) def enable_host_event_handler(self, host): """Enable event handlers for a host Format of the line that triggers function call:: ENABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.event_handler_enabled: host.modified_attributes |= \ DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value host.event_handler_enabled = True self.send_an_element(host.get_update_status_brok()) def enable_host_flap_detection(self, host): """Enable flap detection for a host Format of the line that triggers function call:: ENABLE_HOST_FLAP_DETECTION;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.flap_detection_enabled: host.modified_attributes |= \ DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value host.flap_detection_enabled = True self.send_an_element(host.get_update_status_brok()) def enable_host_freshness_check(self, host): """Enable freshness check for a host Format of the line that triggers function call:: ENABLE_HOST_FRESHNESS_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.check_freshness: host.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value host.check_freshness = True self.send_an_element(host.get_update_status_brok()) def enable_host_freshness_checks(self): """Enable freshness checks (globally) Format of the line that triggers function call:: ENABLE_HOST_FRESHNESS_CHECKS :return: None """ if not self.my_conf.check_host_freshness: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value self.my_conf.check_host_freshness = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def enable_host_notifications(self, host): """Enable notifications for a host Format of the line that triggers function call:: ENABLE_HOST_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.notifications_enabled: host.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value host.notifications_enabled = True self.send_an_element(host.get_update_status_brok()) def enable_host_svc_checks(self, host): """Enable service checks for a host Format of the line that triggers function call:: ENABLE_HOST_SVC_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for service_id in host.services: if service_id in self.daemon.services: service = self.daemon.services[service_id] self.enable_svc_check(service) self.send_an_element(service.get_update_status_brok()) def enable_host_svc_notifications(self, host): """Enable services notifications for a host Format of the line that triggers function call:: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for service_id in host.services: if service_id in self.daemon.services: service = self.daemon.services[service_id] self.enable_svc_notifications(service) self.send_an_element(service.get_update_status_brok()) def enable_notifications(self): """Enable notifications (globally) Format of the line that triggers function call:: ENABLE_NOTIFICATIONS :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.enable_notifications: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value self.my_conf.enable_notifications = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def enable_passive_host_checks(self, host): """Enable passive checks for a host Format of the line that triggers function call:: ENABLE_PASSIVE_HOST_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if not host.passive_checks_enabled: host.modified_attributes |= \ DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value host.passive_checks_enabled = True self.send_an_element(host.get_update_status_brok()) def enable_passive_svc_checks(self, service): """Enable passive checks for a service Format of the line that triggers function call:: ENABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if not service.passive_checks_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value service.passive_checks_enabled = True self.send_an_element(service.get_update_status_brok()) def enable_performance_data(self): """Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None """ if not self.my_conf.process_performance_data: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value self.my_conf.process_performance_data = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def enable_servicegroup_host_checks(self, servicegroup): """Enable host checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.enable_host_check(self.daemon.hosts[host_id]) def enable_servicegroup_host_notifications(self, servicegroup): """Enable host notifications for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.enable_host_notifications(self.daemon.hosts[host_id]) def enable_servicegroup_passive_host_checks(self, servicegroup): """Enable passive host checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): if service_id in self.daemon.services: host_id = self.daemon.services[service_id].host self.enable_passive_host_checks(self.daemon.hosts[host_id]) def enable_servicegroup_passive_svc_checks(self, servicegroup): """Enable passive service checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): self.enable_passive_svc_checks(self.daemon.services[service_id]) def enable_servicegroup_svc_checks(self, servicegroup): """Enable service checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): self.enable_svc_check(self.daemon.services[service_id]) def enable_servicegroup_svc_notifications(self, servicegroup): """Enable service notifications for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None """ for service_id in servicegroup.get_services(): self.enable_svc_notifications(self.daemon.services[service_id]) def enable_service_freshness_checks(self): """Enable service freshness checks (globally) Format of the line that triggers function call:: ENABLE_SERVICE_FRESHNESS_CHECKS :return: None """ if not self.my_conf.check_service_freshness: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value self.my_conf.check_service_freshness = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def enable_svc_check(self, service): """Enable checks for a service Format of the line that triggers function call:: ENABLE_SVC_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if not service.active_checks_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value service.active_checks_enabled = True self.send_an_element(service.get_update_status_brok()) def enable_svc_event_handler(self, service): """Enable event handlers for a service Format of the line that triggers function call:: ENABLE_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if not service.event_handler_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value service.event_handler_enabled = True self.send_an_element(service.get_update_status_brok()) def enable_svc_freshness_check(self, service): """Enable freshness check for a service Format of the line that triggers function call:: ENABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if not service.check_freshness: service.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value service.check_freshness = True self.send_an_element(service.get_update_status_brok()) def enable_svc_flap_detection(self, service): """Enable flap detection for a service Format of the line that triggers function call:: ENABLE_SVC_FLAP_DETECTION;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if not service.flap_detection_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value service.flap_detection_enabled = True self.send_an_element(service.get_update_status_brok()) def enable_svc_notifications(self, service): """Enable notifications for a service Format of the line that triggers function call:: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ if not service.notifications_enabled: service.modified_attributes |= \ DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value service.notifications_enabled = True self.send_an_element(service.get_update_status_brok()) def process_file(self, file_name, delete): """DOES NOTHING (should process a file) Format of the line that triggers function call:: PROCESS_FILE;<file_name>;<delete> :param file_name: file to process :type file_name: str :param delete: delete after processing :type delete: :return: None """ logger.warning("The external command 'PROCESS_FILE' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'PROCESS_FILE: this command is not implemented!')) def process_host_check_result(self, host, status_code, plugin_output): """Process host check result Format of the line that triggers function call:: PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host: host to process check to :type host: alignak.objects.host.Host :param status_code: exit code of plugin :type status_code: int :param plugin_output: plugin output :type plugin_output: str :return: None TODO: say that check is PASSIVE """ now = time.time() cls = host.__class__ # If globally disabled OR host disabled, do not launch.. if not cls.accept_passive_checks or not host.passive_checks_enabled: return try: plugin_output = plugin_output.decode('utf8', 'ignore') logger.debug('%s > Passive host check plugin output: %s', host.get_full_name(), plugin_output) except AttributeError: # Python 3 will raise an exception pass except UnicodeError: pass # Maybe the check is just too old, if so, bail out! if self.current_timestamp < host.last_chk: logger.debug('%s > Passive host check is too old (%.2f seconds). ' 'Ignoring, check output: %s', host.get_full_name(), self.current_timestamp < host.last_chk, plugin_output) return chk = host.launch_check(now, self.hosts, self.services, self.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True) # We will not have a check if an host/service is checked but it has no defined check_command if not chk: return # Now we 'transform the check into a result' # So exit_status, output and status is eaten by the host chk.exit_status = status_code chk.get_outputs(plugin_output, host.max_plugins_output_length) chk.status = ACT_STATUS_WAIT_CONSUME chk.check_time = self.current_timestamp # we are using the external command timestamps # Set the corresponding host's check type to passive chk.set_type_passive() # self.daemon.nb_check_received += 1 self.send_an_element(chk) # Ok now this result will be read by the scheduler the next loop # raise a passive check log only if needed if self.my_conf.log_passive_checks: log_level = 'info' if status_code == 1: # DOWN log_level = 'error' if status_code == 2: # UNREACHABLE log_level = 'warning' self.send_an_element(make_monitoring_log( log_level, 'PASSIVE HOST CHECK: %s;%d;%s;%s;%s' % ( host.get_name(), status_code, chk.output, chk.long_output, chk.perf_data))) def process_host_output(self, host, plugin_output): """Process host output Format of the line that triggers function call:: PROCESS_HOST_OUTPUT;<host_name>;<plugin_output> :param host: host to process check to :type host: alignak.objects.host.Host :param plugin_output: plugin output :type plugin_output: str :return: None """ self.process_host_check_result(host, host.state_id, plugin_output) def process_service_check_result(self, service, return_code, plugin_output): """Process service check result Format of the line that triggers function call:: PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param return_code: exit code of plugin :type return_code: int :param plugin_output: plugin output :type plugin_output: str :return: None """ now = time.time() cls = service.__class__ # If globally disabled OR service disabled, do not launch.. if not cls.accept_passive_checks or not service.passive_checks_enabled: return try: plugin_output = plugin_output.decode('utf8', 'ignore') logger.debug('%s > Passive service check plugin output: %s', service.get_full_name(), plugin_output) except AttributeError: # Python 3 will raise an exception pass except UnicodeError: pass # Maybe the check is just too old, if so, bail out! if self.current_timestamp < service.last_chk: logger.debug('%s > Passive service check is too old (%d seconds). ' 'Ignoring, check output: %s', service.get_full_name(), self.current_timestamp < service.last_chk, plugin_output) return # Create a check object from the external command chk = service.launch_check(now, self.hosts, self.services, self.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True) # Should not be possible to not find the check, but if so, don't crash if not chk: logger.error('%s > Passive service check failed. None check launched !?', service.get_full_name()) return # Now we 'transform the check into a result' # So exit_status, output and status is eaten by the service chk.exit_status = return_code chk.get_outputs(plugin_output, service.max_plugins_output_length) logger.debug('%s > Passive service check output: %s', service.get_full_name(), chk.output) chk.status = ACT_STATUS_WAIT_CONSUME chk.check_time = self.current_timestamp # we are using the external command timestamps # Set the corresponding service's check type to passive chk.set_type_passive() # self.daemon.nb_check_received += 1 self.send_an_element(chk) # Ok now this result will be read by the scheduler the next loop # raise a passive check log only if needed if self.my_conf.log_passive_checks: log_level = 'info' if return_code == 1: # WARNING log_level = 'warning' if return_code == 2: # CRITICAL log_level = 'error' self.send_an_element(make_monitoring_log( log_level, 'PASSIVE SERVICE CHECK: %s;%s;%d;%s;%s;%s' % ( self.hosts[service.host].get_name(), service.get_name(), return_code, chk.output, chk.long_output, chk.perf_data))) def process_service_output(self, service, plugin_output): """Process service output Format of the line that triggers function call:: PROCESS_SERVICE_OUTPUT;<host_name>;<service_description>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param plugin_output: plugin output :type plugin_output: str :return: None """ self.process_service_check_result(service, service.state_id, plugin_output) def read_state_information(self): """Request to load the live state from the retention storage Format of the line that triggers function call:: READ_STATE_INFORMATION :return: None """ logger.warning("The external command 'READ_STATE_INFORMATION' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'READ_STATE_INFORMATION: this command is not implemented!')) @staticmethod def remove_host_acknowledgement(host): """Remove an acknowledgment on a host Format of the line that triggers function call:: REMOVE_HOST_ACKNOWLEDGEMENT;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ host.unacknowledge_problem() @staticmethod def remove_svc_acknowledgement(service): """Remove an acknowledgment on a service Format of the line that triggers function call:: REMOVE_SVC_ACKNOWLEDGEMENT;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None """ service.unacknowledge_problem() def restart_program(self): """Restart Alignak Format of the line that triggers function call:: RESTART_PROGRAM :return: None """ restart_cmd = self.commands.find_by_name('restart-alignak') if not restart_cmd: logger.error("Cannot restart Alignak : missing command named" " 'restart-alignak'. Please add one") return restart_cmd_line = restart_cmd.command_line logger.warning("RESTART command : %s", restart_cmd_line) # Ok get an event handler command that will run in 15min max e_handler = EventHandler({'command': restart_cmd_line, 'timeout': 900}) # Ok now run it e_handler.execute() # And wait for the command to finish while e_handler.status not in [ACT_STATUS_DONE, ACT_STATUS_TIMEOUT]: e_handler.check_finished(64000) log_level = 'info' if e_handler.status == ACT_STATUS_TIMEOUT or e_handler.exit_status != 0: logger.error("Cannot restart Alignak : the 'restart-alignak' command failed with" " the error code '%d' and the text '%s'.", e_handler.exit_status, e_handler.output) log_level = 'error' # Ok here the command succeed, we can now wait our death self.send_an_element(make_monitoring_log(log_level, "RESTART: %s" % (e_handler.output))) def reload_config(self): """Reload Alignak configuration Format of the line that triggers function call:: RELOAD_CONFIG :return: None """ reload_cmd = self.commands.find_by_name('reload-alignak') if not reload_cmd: logger.error("Cannot restart Alignak : missing command" " named 'reload-alignak'. Please add one") return logger.warning("RELOAD command : %s", reload_cmd) reload_cmd_line = reload_cmd.command_line logger.warning("RELOAD command : %s", reload_cmd_line) # Ok get an event handler command that will run in 15min max e_handler = EventHandler({'command': reload_cmd_line, 'timeout': 900}) # Ok now run it e_handler.execute() # And wait for the command to finish while e_handler.status not in [ACT_STATUS_DONE, ACT_STATUS_TIMEOUT]: e_handler.check_finished(64000) log_level = 'info' if e_handler.status == ACT_STATUS_TIMEOUT or e_handler.exit_status != 0: logger.error("Cannot reload Alignak configuration: the 'reload-alignak' command failed" " with the error code '%d' and the text '%s'.", e_handler.exit_status, e_handler.output) log_level = 'error' # Ok here the command succeed, we can now wait our death self.send_an_element(make_monitoring_log(log_level, "RELOAD: %s" % (e_handler.output))) def save_state_information(self): """Request to save the live state to the retention Format of the line that triggers function call:: SAVE_STATE_INFORMATION :return: None """ logger.warning("The external command 'SAVE_STATE_INFORMATION' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SAVE_STATE_INFORMATION: this command is not implemented!')) def schedule_and_propagate_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): """DOES NOTHING (Should create host downtime and start it?) Format of the line that triggers function call:: SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :return: None """ logger.warning("The external command 'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME: this command is not implemented!')) def schedule_and_propagate_triggered_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): """DOES NOTHING (Should create triggered host downtime and start it?) Format of the line that triggers function call:: SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :return: None """ logger.warning("The external command 'SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME: ' 'this command is not implemented!')) def schedule_contact_downtime(self, contact, start_time, end_time, author, comment): """Schedule contact downtime Format of the line that triggers function call:: SCHEDULE_CONTACT_DOWNTIME;<contact_name>;<start_time>;<end_time>;<author>;<comment> :param contact: contact to put in downtime :type contact: alignak.objects.contact.Contact :param start_time: downtime start time :type start_time: int :param end_time: downtime end time :type end_time: int :param author: downtime author :type author: str :param comment: text comment :type comment: str :return: None """ data = {'ref': contact.uuid, 'start_time': start_time, 'end_time': end_time, 'author': author, 'comment': comment} cdt = ContactDowntime(data) contact.add_downtime(cdt) self.send_an_element(contact.get_update_status_brok()) def schedule_forced_host_check(self, host, check_time): """Schedule a forced check on a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None """ host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(host.get_update_status_brok()) def schedule_forced_host_svc_checks(self, host, check_time): """Schedule a forced check on all services of a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None """ for service_id in host.services: service = self.daemon.services[service_id] self.schedule_forced_svc_check(service, check_time) self.send_an_element(service.get_update_status_brok()) def schedule_forced_svc_check(self, service, check_time): """Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None """ service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(service.get_update_status_brok()) def schedule_hostgroup_host_downtime(self, hostgroup, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a downtime for each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: host = self.daemon.hosts[host_id] self.schedule_host_downtime(host, start_time, end_time, fixed, trigger_id, duration, author, comment) def schedule_hostgroup_svc_downtime(self, hostgroup, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a downtime for each service of each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;<hostgroup_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: host = self.daemon.hosts[host_id] for service_id in host.services: service = self.daemon.services[service_id] self.schedule_svc_downtime(service, start_time, end_time, fixed, trigger_id, duration, author, comment) def schedule_host_check(self, host, check_time): """Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None """ host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=check_time) self.send_an_element(host.get_update_status_brok()) def schedule_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a host downtime Format of the line that triggers function call:: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :param host: host to schedule downtime :type host: alignak.object.host.Host :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ data = {'ref': host.uuid, 'ref_type': host.my_type, 'start_time': start_time, 'end_time': end_time, 'fixed': fixed, 'trigger_id': trigger_id, 'duration': duration, 'author': author, 'comment': comment} downtime = Downtime(data) downtime.add_automatic_comment(host) host.add_downtime(downtime) self.send_an_element(host.get_update_status_brok()) if trigger_id not in ('', 0): for item in self.daemon.hosts: if trigger_id in item.downtimes: host.downtimes[trigger_id].trigger_me(downtime.uuid) def schedule_host_svc_checks(self, host, check_time): """Schedule a check on all services of a host Format of the line that triggers function call:: SCHEDULE_HOST_SVC_CHECKS;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None """ for service_id in host.services: service = self.daemon.services[service_id] self.schedule_svc_check(service, check_time) self.send_an_element(service.get_update_status_brok()) def schedule_host_svc_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a service downtime for each service of an host Format of the line that triggers function call:: SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param host: host to schedule downtime :type host: alignak.object.host.Host :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for serv in host.services: self.schedule_svc_downtime(serv, start_time, end_time, fixed, trigger_id, duration, author, comment) def schedule_servicegroup_host_downtime(self, servicegroup, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a host downtime for each host of services in a servicegroup Format of the line that triggers function call:: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :param servicegroup: servicegroup to schedule downtime :type servicegroup: alignak.object.servicegroup.Servicegroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for host in [s.host for s in servicegroup.get_services()]: self.schedule_host_downtime(host, start_time, end_time, fixed, trigger_id, duration, author, comment) def schedule_servicegroup_svc_downtime(self, servicegroup, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a service downtime for each service of a servicegroup Format of the line that triggers function call:: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param servicegroup: servicegroup to schedule downtime :type servicegroup: alignak.object.servicegroup.Servicegroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for serv in servicegroup.get_services(): self.schedule_svc_downtime(serv, start_time, end_time, fixed, trigger_id, duration, author, comment) def schedule_svc_check(self, service, check_time): """Schedule a check on a service Format of the line that triggers function call:: SCHEDULE_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: :return: None """ service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=check_time) self.send_an_element(service.get_update_status_brok()) def schedule_svc_downtime(self, service, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a service downtime Format of the line that triggers function call:: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description><start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param service: service to check :type service: alignak.object.service.Service :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: int :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ data = {'ref': service.uuid, 'ref_type': service.my_type, 'start_time': start_time, 'end_time': end_time, 'fixed': fixed, 'trigger_id': trigger_id, 'duration': duration, 'author': author, 'comment': comment} downtime = Downtime(data) downtime.add_automatic_comment(service) service.add_downtime(downtime) self.send_an_element(service.get_update_status_brok()) if trigger_id not in ('', 0): for item in self.daemon.services: if trigger_id in item.downtimes: service.downtimes[trigger_id].trigger_me(downtime.uuid) def send_custom_host_notification(self, host, options, author, comment): """DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment> :param host: host to send notif for :type host: alignak.object.host.Host :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None """ logger.warning("The external command 'SEND_CUSTOM_HOST_NOTIFICATION' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SEND_CUSTOM_HOST_NOTIFICATION: this command is not implemented!')) def send_custom_svc_notification(self, service, options, author, comment): """DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_SVC_NOTIFICATION;<host_name>;<service_description>;<options>;<author>;<comment>> :param service: service to send notif for :type service: alignak.object.service.Service :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None """ logger.warning("The external command 'SEND_CUSTOM_SVC_NOTIFICATION' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SEND_CUSTOM_SVC_NOTIFICATION: this command is not implemented!')) def set_host_notification_number(self, host, notification_number): """DOES NOTHING (Should set host notification number) Format of the line that triggers function call:: SET_HOST_NOTIFICATION_NUMBER;<host_name>;<notification_number> :param host: host to edit :type host: alignak.object.host.Host :param notification_number: new value to set :type notification_number: :return: None """ logger.warning("The external command 'SET_HOST_NOTIFICATION_NUMBER' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SET_HOST_NOTIFICATION_NUMBER: this command is not implemented!')) def set_svc_notification_number(self, service, notification_number): """DOES NOTHING (Should set host notification number) Format of the line that triggers function call:: SET_SVC_NOTIFICATION_NUMBER;<host_name>;<service_description>;<notification_number> :param service: service to edit :type service: alignak.object.service.Service :param notification_number: new value to set :type notification_number: :return: None """ logger.warning("The external command 'SET_SVC_NOTIFICATION_NUMBER' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SET_SVC_NOTIFICATION_NUMBER: this command is not implemented!')) def shutdown_program(self): """DOES NOTHING (Should shutdown Alignak) Format of the line that triggers function call:: SHUTDOWN_PROGRAM :return: None """ logger.warning("The external command 'SHUTDOWN_PROGRAM' " "is not currently implemented in Alignak. If you really need it, " "request for its implementation in the project repository: " "https://github.com/Alignak-monitoring/alignak") self.send_an_element(make_monitoring_log( 'warning', 'SHUTDOWN_PROGRAM: this command is not implemented!')) def start_accepting_passive_host_checks(self): """Enable passive host check submission (globally) Format of the line that triggers function call:: START_ACCEPTING_PASSIVE_HOST_CHECKS :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.accept_passive_host_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value self.my_conf.accept_passive_host_checks = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def start_accepting_passive_svc_checks(self): """Enable passive service check submission (globally) Format of the line that triggers function call:: START_ACCEPTING_PASSIVE_SVC_CHECKS :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.accept_passive_service_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value self.my_conf.accept_passive_service_checks = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def start_executing_host_checks(self): """Enable host check execution (globally) Format of the line that triggers function call:: START_EXECUTING_HOST_CHECKS :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.execute_host_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value self.my_conf.execute_host_checks = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def start_executing_svc_checks(self): """Enable service check execution (globally) Format of the line that triggers function call:: START_EXECUTING_SVC_CHECKS :return: None """ # todo: #783 create a dedicated brok for global parameters if not self.my_conf.execute_service_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value self.my_conf.execute_service_checks = True self.my_conf.explode_global_conf() self.daemon.update_program_status() def stop_accepting_passive_host_checks(self): """Disable passive host check submission (globally) Format of the line that triggers function call:: STOP_ACCEPTING_PASSIVE_HOST_CHECKS :return: None """ if self.my_conf.accept_passive_host_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value self.my_conf.accept_passive_host_checks = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def stop_accepting_passive_svc_checks(self): """Disable passive service check submission (globally) Format of the line that triggers function call:: STOP_ACCEPTING_PASSIVE_SVC_CHECKS :return: None """ if self.my_conf.accept_passive_service_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value self.my_conf.accept_passive_service_checks = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def stop_executing_host_checks(self): """Disable host check execution (globally) Format of the line that triggers function call:: STOP_EXECUTING_HOST_CHECKS :return: None """ if self.my_conf.execute_host_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value self.my_conf.execute_host_checks = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def stop_executing_svc_checks(self): """Disable service check execution (globally) Format of the line that triggers function call:: STOP_EXECUTING_SVC_CHECKS :return: None """ if self.my_conf.execute_service_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value self.my_conf.execute_service_checks = False self.my_conf.explode_global_conf() self.daemon.update_program_status() def launch_svc_event_handler(self, service): """Launch event handler for a service Format of the line that triggers function call:: LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to execute the event handler :type service: alignak.objects.service.Service :return: None """ service.get_event_handlers(self.hosts, self.daemon.macromodulations, self.daemon.timeperiods, ext_cmd=True) def launch_host_event_handler(self, host): """Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None """ host.get_event_handlers(self.hosts, self.daemon.macromodulations, self.daemon.timeperiods, ext_cmd=True)
class ExternalCommandManager(object): '''ExternalCommandManager manages all external commands sent to Alignak. It basically parses arguments and executes the right function ''' def __init__(self, conf, mode, daemon, accept_unknown=False, log_external_commands=False): ''' The command manager is initialized with a `mode` parameter specifying what is to be done with the managed commands. If mode is: - applyer, the user daemon is a scheduler that will execute the command - dispatcher, the user daemon only dispatches the command to an applyer - receiver, the user daemon only receives commands, analyses and then dispatches them to the schedulers Note that the daemon parameter is really a Daemon object except for the scheduler where it is a Scheduler object! If `accept_passive_unknown_check_results` is True, then a Brok will be created even if passive checks are received for unknown host/service else a Warning log will be emitted.. Note: the receiver mode has no configuration :param conf: current configuration :type conf: alignak.objects.Config :param mode: command manager mode :type mode: str :param daemon: :type daemon: alignak.Daemon :param accept_unknown: accept or not unknown passive checks results :type accept_unknown: bool ''' pass def send_an_element(self, element): '''Send an element (Brok, Comment,...) to our daemon Use the daemon `add` function if it exists, else raise an error log :param element: elementto be sent :type: alignak.Brok, or Comment, or Downtime, ... :return: ''' pass def resolve_command(self, excmd): '''Parse command and dispatch it (to schedulers for example) if necessary If the command is not global it will be executed. :param excmd: external command to handle :type excmd: alignak.external_command.ExternalCommand :return: result of command parsing. None for an invalid command. ''' pass def search_host_and_dispatch(self, host_name, command, extcmd): '''Try to dispatch a command for a specific host (so specific scheduler) because this command is related to a host (change notification interval for example) :param host_name: host name to search :type host_name: str :param command: command line :type command: str :param extcmd: external command object (the object will be added to sched commands list) :type extcmd: alignak.external_command.ExternalCommand :return: None ''' pass @staticmethod def get_unknown_check_result_brok(cmd_line): '''Create unknown check result brok and fill it with command data :param cmd_line: command line to extract data :type cmd_line: str :return: unknown check result brok :rtype: alignak.objects.brok.Brok ''' pass def get_command_and_args(self, command, extcmd=None): '''Parse command and get args :param command: command line to parse :type command: str :param extcmd: external command object (used to dispatch) :type extcmd: None | object :return: Dict containing command and arg :: {'global': False, 'c_name': c_name, 'args': args} :rtype: dict | None ''' pass @staticmethod def change_contact_modsattr(contact, value): '''Change contact modified service attribute value Format of the line that triggers function call:: CHANGE_CONTACT_MODSATTR;<contact_name>;<value> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param value: new value to set :type value: str :return: None ''' pass @staticmethod def change_contact_modhattr(contact, value): '''Change contact modified host attribute value Format of the line that triggers function call:: CHANGE_CONTACT_MODHATTR;<contact_name>;<value> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param value: new value to set :type value:str :return: None ''' pass @staticmethod def change_contact_modattr(contact, value): '''Change contact modified attribute value Format of the line that triggers function call:: CHANGE_CONTACT_MODATTR;<contact_name>;<value> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param value: new value to set :type value: str :return: None ''' pass def change_contact_host_notification_timeperiod(self, contact, notification_timeperiod): '''Change contact host notification timeperiod value Format of the line that triggers function call:: CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param notification_timeperiod: timeperiod to set :type notification_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None ''' pass def add_svc_comment(self, service, author, comment): '''Add a service comment Format of the line that triggers function call:: ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment> :param service: service to add the comment :type service: alignak.objects.service.Service :param author: author name :type author: str :param comment: text comment :type comment: str :return: None ''' pass def add_host_comment(self, host, author, comment): '''Add a host comment Format of the line that triggers function call:: ADD_HOST_COMMENT;<host_name>;<persistent:obsolete>;<author>;<comment> :param host: host to add the comment :type host: alignak.objects.host.Host :param author: author name :type author: str :param comment: text comment :type comment: str :return: None ''' pass def acknowledge_svc_problem(self, service, sticky, notify, author, comment): '''Acknowledge a service problem Format of the line that triggers function call:: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;<sticky>;<notify>; <persistent:obsolete>;<author>;<comment> :param service: service to acknowledge the problem :type service: alignak.objects.service.Service :param sticky: if sticky == 2, the acknowledge will remain until the service returns to an OK state else the acknowledge will be removed as soon as the service state changes :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None ''' pass def acknowledge_host_problem(self, host, sticky, notify, author, comment): '''Acknowledge a host problem Format of the line that triggers function call:: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent:obsolete>;<author>; <comment> :param host: host to acknowledge the problem :type host: alignak.objects.host.Host :param sticky: if sticky == 2, the acknowledge will remain until the host returns to an UP state else the acknowledge will be removed as soon as the host state changes :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None TODO: add a better ACK management ''' pass def acknowledge_svc_problem_expire(self, service, sticky, notify, end_time, author, comment): '''Acknowledge a service problem with expire time for this acknowledgement Format of the line that triggers function call:: ACKNOWLEDGE_SVC_PROBLEM_EXPIRE;<host_name>;<service_description>;<sticky>;<notify>; <persistent:obsolete>;<end_time>;<author>;<comment> :param service: service to acknowledge the problem :type service: alignak.objects.service.Service :param sticky: acknowledge will be always present is host return in UP state :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end) :type end_time: int :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None ''' pass def acknowledge_host_problem_expire(self, host, sticky, notify, end_time, author, comment): '''Acknowledge a host problem with expire time for this acknowledgement Format of the line that triggers function call:: ACKNOWLEDGE_HOST_PROBLEM_EXPIRE;<host_name>;<sticky>;<notify>;<persistent:obsolete>; <end_time>;<author>;<comment> :param host: host to acknowledge the problem :type host: alignak.objects.host.Host :param sticky: acknowledge will be always present is host return in UP state :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end) :type end_time: int :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None TODO: add a better ACK management ''' pass def change_contact_svc_notification_timeperiod(self, contact, notification_timeperiod): '''Change contact service notification timeperiod value Format of the line that triggers function call:: CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param notification_timeperiod: timeperiod to set :type notification_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None ''' pass def change_custom_contact_var(self, contact, varname, varvalue): '''Change custom contact variable Format of the line that triggers function call:: CHANGE_CUSTOM_CONTACT_VAR;<contact_name>;<varname>;<varvalue> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :param varname: variable name to change :type varname: str :param varvalue: variable new value :type varvalue: str :return: None ''' pass def change_custom_host_var(self, host, varname, varvalue): '''Change custom host variable Format of the line that triggers function call:: CHANGE_CUSTOM_HOST_VAR;<host_name>;<varname>;<varvalue> :param host: host to edit :type host: alignak.objects.host.Host :param varname: variable name to change :type varname: str :param varvalue: variable new value :type varvalue: str :return: None ''' pass def change_custom_svc_var(self, service, varname, varvalue): '''Change custom service variable Format of the line that triggers function call:: CHANGE_CUSTOM_SVC_VAR;<host_name>;<service_description>;<varname>;<varvalue> :param service: service to edit :type service: alignak.objects.service.Service :param varname: variable name to change :type varvalue: str :param varvalue: variable new value :type varname: str :return: None ''' pass def change_global_host_event_handler(self, event_handler_command): '''DOES NOTHING (should change global host event handler) Format of the line that triggers function call:: CHANGE_GLOBAL_HOST_EVENT_HANDLER;<event_handler_command> :param event_handler_command: new event handler :type event_handler_command: :return: None TODO: DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value ''' pass def change_global_svc_event_handler(self, event_handler_command): '''DOES NOTHING (should change global service event handler) Format of the line that triggers function call:: CHANGE_GLOBAL_SVC_EVENT_HANDLER;<event_handler_command> :param event_handler_command: new event handler :type event_handler_command: :return: None TODO: DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value ''' pass def change_host_check_command(self, host, check_command): '''Modify host check command Format of the line that triggers function call:: CHANGE_HOST_CHECK_COMMAND;<host_name>;<check_command> :param host: host to modify check command :type host: alignak.objects.host.Host :param check_command: command line :type check_command: :return: None ''' pass def change_host_check_timeperiod(self, host, timeperiod): '''Modify host check timeperiod Format of the line that triggers function call:: CHANGE_HOST_CHECK_TIMEPERIOD;<host_name>;<timeperiod> :param host: host to modify check timeperiod :type host: alignak.objects.host.Host :param timeperiod: timeperiod object :type timeperiod: alignak.objects.timeperiod.Timeperiod :return: None ''' pass def change_host_event_handler(self, host, event_handler_command): '''Modify host event handler Format of the line that triggers function call:: CHANGE_HOST_EVENT_HANDLER;<host_name>;<event_handler_command> :param host: host to modify event handler :type host: alignak.objects.host.Host :param event_handler_command: event handler command line :type event_handler_command: :return: None ''' pass def change_host_snapshot_command(self, host, snapshot_command): '''Modify host snapshot command Format of the line that triggers function call:: CHANGE_HOST_SNAPSHOT_COMMAND;<host_name>;<event_handler_command> :param host: host to modify snapshot command :type host: alignak.objects.host.Host :param snapshot_command: snapshot command command line :type snapshot_command: :return: None ''' pass def change_host_modattr(self, host, value): '''Change host modified attributes Format of the line that triggers function call:: CHANGE_HOST_MODATTR;<host_name>;<value> For boolean attributes, toggles the service attribute state (enable/disable) For non boolean attribute, only indicates that the corresponding attribute is to be saved in the retention. Value can be: MODATTR_NONE 0 MODATTR_NOTIFICATIONS_ENABLED 1 MODATTR_ACTIVE_CHECKS_ENABLED 2 MODATTR_PASSIVE_CHECKS_ENABLED 4 MODATTR_EVENT_HANDLER_ENABLED 8 MODATTR_FLAP_DETECTION_ENABLED 16 MODATTR_PERFORMANCE_DATA_ENABLED 64 MODATTR_EVENT_HANDLER_COMMAND 256 MODATTR_CHECK_COMMAND 512 MODATTR_NORMAL_CHECK_INTERVAL 1024 MODATTR_RETRY_CHECK_INTERVAL 2048 MODATTR_MAX_CHECK_ATTEMPTS 4096 MODATTR_FRESHNESS_CHECKS_ENABLED 8192 MODATTR_CHECK_TIMEPERIOD 16384 MODATTR_CUSTOM_VARIABLE 32768 MODATTR_NOTIFICATION_TIMEPERIOD 65536 :param host: host to edit :type host: alignak.objects.host.Host :param value: new value to set :type value: str :return: None ''' pass def change_max_host_check_attempts(self, host, check_attempts): '''Modify max host check attempt Format of the line that triggers function call:: CHANGE_MAX_HOST_CHECK_ATTEMPTS;<host_name>;<check_attempts> :param host: host to edit :type host: alignak.objects.host.Host :param check_attempts: new value to set :type check_attempts: int :return: None ''' pass def change_max_svc_check_attempts(self, service, check_attempts): '''Modify max service check attempt Format of the line that triggers function call:: CHANGE_MAX_SVC_CHECK_ATTEMPTS;<host_name>;<service_description>;<check_attempts> :param service: service to edit :type service: alignak.objects.service.Service :param check_attempts: new value to set :type check_attempts: int :return: None ''' pass def change_normal_host_check_interval(self, host, check_interval): '''Modify host check interval Format of the line that triggers function call:: CHANGE_NORMAL_HOST_CHECK_INTERVAL;<host_name>;<check_interval> :param host: host to edit :type host: alignak.objects.host.Host :param check_interval: new value to set :type check_interval: :return: None ''' pass def change_normal_svc_check_interval(self, service, check_interval): '''Modify service check interval Format of the line that triggers function call:: CHANGE_NORMAL_SVC_CHECK_INTERVAL;<host_name>;<service_description>;<check_interval> :param service: service to edit :type service: alignak.objects.service.Service :param check_interval: new value to set :type check_interval: :return: None ''' pass def change_retry_host_check_interval(self, host, check_interval): '''Modify host retry interval Format of the line that triggers function call:: CHANGE_RETRY_HOST_CHECK_INTERVAL;<host_name>;<check_interval> :param host: host to edit :type host: alignak.objects.host.Host :param check_interval: new value to set :type check_interval: :return: None ''' pass def change_retry_svc_check_interval(self, service, check_interval): '''Modify service retry interval Format of the line that triggers function call:: CHANGE_RETRY_SVC_CHECK_INTERVAL;<host_name>;<service_description>;<check_interval> :param service: service to edit :type service: alignak.objects.service.Service :param check_interval: new value to set :type check_interval: :return: None ''' pass def change_svc_check_command(self, service, check_command): '''Modify service check command Format of the line that triggers function call:: CHANGE_SVC_CHECK_COMMAND;<host_name>;<service_description>;<check_command> :param service: service to modify check command :type service: alignak.objects.service.Service :param check_command: command line :type check_command: :return: None ''' pass def change_svc_check_timeperiod(self, service, check_timeperiod): '''Modify service check timeperiod Format of the line that triggers function call:: CHANGE_SVC_CHECK_TIMEPERIOD;<host_name>;<service_description>;<check_timeperiod> :param service: service to modify check timeperiod :type service: alignak.objects.service.Service :param check_timeperiod: timeperiod object :type check_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None ''' pass def change_svc_event_handler(self, service, event_handler_command): '''Modify service event handler Format of the line that triggers function call:: CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command> :param service: service to modify event handler :type service: alignak.objects.service.Service :param event_handler_command: event handler command line :type event_handler_command: :return: None ''' pass def change_svc_snapshot_command(self, service, snapshot_command): '''Modify host snapshot command Format of the line that triggers function call:: CHANGE_SVC_SNAPSHOT_COMMAND;<host_name>;<event_handler_command> :param service: service to modify snapshot command :type service: alignak.objects.service.Service :param snapshot_command: snapshot command command line :type snapshot_command: :return: None ''' pass def change_svc_modattr(self, service, value): '''Change service modified attributes Format of the line that triggers function call:: CHANGE_SVC_MODATTR;<host_name>;<service_description>;<value> For boolean attributes, toggles the service attribute state (enable/disable) For non boolean attribute, only indicates that the corresponding attribute is to be saved in the retention. Value can be: MODATTR_NONE 0 MODATTR_NOTIFICATIONS_ENABLED 1 MODATTR_ACTIVE_CHECKS_ENABLED 2 MODATTR_PASSIVE_CHECKS_ENABLED 4 MODATTR_EVENT_HANDLER_ENABLED 8 MODATTR_FLAP_DETECTION_ENABLED 16 MODATTR_PERFORMANCE_DATA_ENABLED 64 MODATTR_EVENT_HANDLER_COMMAND 256 MODATTR_CHECK_COMMAND 512 MODATTR_NORMAL_CHECK_INTERVAL 1024 MODATTR_RETRY_CHECK_INTERVAL 2048 MODATTR_MAX_CHECK_ATTEMPTS 4096 MODATTR_FRESHNESS_CHECKS_ENABLED 8192 MODATTR_CHECK_TIMEPERIOD 16384 MODATTR_CUSTOM_VARIABLE 32768 MODATTR_NOTIFICATION_TIMEPERIOD 65536 :param service: service to edit :type service: alignak.objects.service.Service :param value: new value to set / unset :type value: str :return: None ''' pass def change_svc_notification_timeperiod(self, service, notification_timeperiod): '''Change service notification timeperiod Format of the line that triggers function call:: CHANGE_SVC_NOTIFICATION_TIMEPERIOD;<host_name>;<service_description>; <notification_timeperiod> :param service: service to edit :type service: alignak.objects.service.Service :param notification_timeperiod: timeperiod to set :type notification_timeperiod: alignak.objects.timeperiod.Timeperiod :return: None ''' pass def delay_host_notification(self, host, notification_time): '''Modify host first notification delay Format of the line that triggers function call:: DELAY_HOST_NOTIFICATION;<host_name>;<notification_time> :param host: host to edit :type host: alignak.objects.host.Host :param notification_time: new value to set :type notification_time: :return: None ''' pass def delay_svc_notification(self, service, notification_time): '''Modify service first notification delay Format of the line that triggers function call:: DELAY_SVC_NOTIFICATION;<host_name>;<service_description>;<notification_time> :param service: service to edit :type service: alignak.objects.service.Service :param notification_time: new value to set :type notification_time: :return: None ''' pass def del_all_contact_downtimes(self, contact): '''Delete all contact downtimes Format of the line that triggers function call:: DEL_ALL_CONTACT_DOWNTIMES;<contact_name> :param contact: contact to edit :type contact: alignak.objects.contact.Contact :return: None ''' pass def del_all_host_comments(self, host): '''Delete all host comments Format of the line that triggers function call:: DEL_ALL_HOST_COMMENTS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def del_all_host_downtimes(self, host): '''Delete all host downtimes Format of the line that triggers function call:: DEL_ALL_HOST_DOWNTIMES;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def del_all_svc_comments(self, service): '''Delete all service comments Format of the line that triggers function call:: DEL_ALL_SVC_COMMENTS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def del_all_svc_downtimes(self, service): '''Delete all service downtime Format of the line that triggers function call:: DEL_ALL_SVC_DOWNTIMES;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def del_contact_downtime(self, downtime_id): '''Delete a contact downtime Format of the line that triggers function call:: DEL_CONTACT_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None ''' pass def del_host_comment(self, comment_id): '''Delete a host comment Format of the line that triggers function call:: DEL_HOST_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None ''' pass def del_host_downtime(self, downtime_id): '''Delete a host downtime Format of the line that triggers function call:: DEL_HOST_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None ''' pass def del_svc_comment(self, comment_id): '''Delete a service comment Format of the line that triggers function call:: DEL_SVC_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None ''' pass def del_svc_downtime(self, downtime_id): '''Delete a service downtime Format of the line that triggers function call:: DEL_SVC_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None ''' pass def disable_all_notifications_beyond_host(self, host): '''DOES NOTHING (should disable notification beyond a host) Format of the line that triggers function call:: DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None TODO: Implement it ''' pass def disable_contactgroup_host_notifications(self, contactgroup): '''Disable host notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None ''' pass def disable_contactgroup_svc_notifications(self, contactgroup): '''Disable service notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None ''' pass def disable_contact_host_notifications(self, contact): '''Disable host notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_HOST_NOTIFICATIONS;<contact_name> :param contact: contact to disable :type contact: alignak.objects.contact.Contact :return: None ''' pass def disable_contact_svc_notifications(self, contact): '''Disable service notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name> :param contact: contact to disable :type contact: alignak.objects.contact.Contact :return: None ''' pass def disable_event_handlers(self): '''Disable event handlers (globally) Format of the line that triggers function call:: DISABLE_EVENT_HANDLERS :return: None ''' pass def disable_flap_detection(self): '''Disable flap detection (globally) Format of the line that triggers function call:: DISABLE_FLAP_DETECTION :return: None ''' pass def disable_hostgroup_host_checks(self, hostgroup): '''Disable host checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def disable_hostgroup_host_notifications(self, hostgroup): '''Disable host notifications for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def disable_hostgroup_passive_host_checks(self, hostgroup): '''Disable host passive checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def disable_hostgroup_passive_svc_checks(self, hostgroup): '''Disable service passive checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def disable_hostgroup_svc_checks(self, hostgroup): '''Disable service checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def disable_hostgroup_svc_notifications(self, hostgroup): '''Disable service notifications for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def disable_host_and_child_notifications(self, host): '''DOES NOTHING (Should disable host notifications and its child) Format of the line that triggers function call:: DISABLE_HOST_AND_CHILD_NOTIFICATIONS;<host_name :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_check(self, host): '''Disable checks for a host Format of the line that triggers function call:: DISABLE_HOST_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_event_handler(self, host): '''Disable event handlers for a host Format of the line that triggers function call:: DISABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_flap_detection(self, host): '''Disable flap detection for a host Format of the line that triggers function call:: DISABLE_HOST_FLAP_DETECTION;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_freshness_check(self, host): '''Disable freshness check for a host Format of the line that triggers function call:: DISABLE_HOST_FRESHNESS_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_freshness_checks(self): '''Disable freshness checks (globally) Format of the line that triggers function call:: DISABLE_HOST_FRESHNESS_CHECKS :return: None ''' pass def disable_host_notifications(self, host): '''Disable notifications for a host Format of the line that triggers function call:: DISABLE_HOST_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_svc_checks(self, host): '''Disable service checks for a host Format of the line that triggers function call:: DISABLE_HOST_SVC_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_host_svc_notifications(self, host): '''Disable services notifications for a host Format of the line that triggers function call:: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_notifications(self): '''Disable notifications (globally) Format of the line that triggers function call:: DISABLE_NOTIFICATIONS :return: None ''' pass def disable_passive_host_checks(self, host): '''Disable passive checks for a host Format of the line that triggers function call:: DISABLE_PASSIVE_HOST_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def disable_passive_svc_checks(self, service): '''Disable passive checks for a service Format of the line that triggers function call:: DISABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def disable_performance_data(self): '''Disable performance data processing (globally) Format of the line that triggers function call:: DISABLE_PERFORMANCE_DATA :return: None ''' pass def disable_servicegroup_host_checks(self, servicegroup): '''Disable host checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def disable_servicegroup_host_notifications(self, servicegroup): '''Disable host notifications for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def disable_servicegroup_passive_host_checks(self, servicegroup): '''Disable passive host checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def disable_servicegroup_passive_svc_checks(self, servicegroup): '''Disable passive service checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def disable_servicegroup_svc_checks(self, servicegroup): '''Disable service checks for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def disable_servicegroup_svc_notifications(self, servicegroup): '''Disable service notifications for a servicegroup Format of the line that triggers function call:: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to disable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def disable_service_flap_detection(self, service): '''Disable flap detection for a service Format of the line that triggers function call:: DISABLE_SERVICE_FLAP_DETECTION;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def disable_svc_freshness_check(self, service): '''Disable freshness check for a service Format of the line that triggers function call:: DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def disable_service_freshness_checks(self): '''Disable service freshness checks (globally) Format of the line that triggers function call:: DISABLE_SERVICE_FRESHNESS_CHECKS :return: None ''' pass def disable_svc_check(self, service): '''Disable checks for a service Format of the line that triggers function call:: DISABLE_SVC_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def disable_svc_event_handler(self, service): '''Disable event handlers for a service Format of the line that triggers function call:: DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def disable_svc_flap_detection(self, service): '''Disable flap detection for a service Format of the line that triggers function call:: DISABLE_SVC_FLAP_DETECTION;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def disable_svc_notifications(self, service): '''Disable notifications for a service Format of the line that triggers function call:: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def enable_all_notifications_beyond_host(self, host): '''DOES NOTHING (should enable notification beyond a host) Format of the line that triggers function call:: ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None TODO: Implement it ''' pass def enable_contactgroup_host_notifications(self, contactgroup): '''Enable host notifications for a contactgroup Format of the line that triggers function call:: ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to enable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None ''' pass def enable_contactgroup_svc_notifications(self, contactgroup): '''Enable service notifications for a contactgroup Format of the line that triggers function call:: ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to enable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None ''' pass def enable_contact_host_notifications(self, contact): '''Enable host notifications for a contact Format of the line that triggers function call:: ENABLE_CONTACT_HOST_NOTIFICATIONS;<contact_name> :param contact: contact to enable :type contact: alignak.objects.contact.Contact :return: None ''' pass def enable_contact_svc_notifications(self, contact): '''Enable service notifications for a contact Format of the line that triggers function call:: DISABLE_CONTACT_SVC_NOTIFICATIONS;<contact_name> :param contact: contact to enable :type contact: alignak.objects.contact.Contact :return: None ''' pass def enable_event_handlers(self): '''Enable event handlers (globally) Format of the line that triggers function call:: ENABLE_EVENT_HANDLERS :return: None ''' pass def enable_flap_detection(self): '''Enable flap detection (globally) Format of the line that triggers function call:: ENABLE_FLAP_DETECTION :return: None ''' pass def enable_hostgroup_host_checks(self, hostgroup): '''Enable host checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def enable_hostgroup_host_notifications(self, hostgroup): '''Enable host notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def enable_hostgroup_passive_host_checks(self, hostgroup): '''Enable host passive checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def enable_hostgroup_passive_svc_checks(self, hostgroup): '''Enable service passive checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def enable_hostgroup_svc_checks(self, hostgroup): '''Enable service checks for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def enable_hostgroup_svc_notifications(self, hostgroup): '''Enable service notifications for a hostgroup Format of the line that triggers function call:: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name> :param hostgroup: hostgroup to enable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ''' pass def enable_host_and_child_notifications(self, host): '''DOES NOTHING (Should enable host notifications and its child) Format of the line that triggers function call:: ENABLE_HOST_AND_CHILD_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_check(self, host): '''Enable checks for a host Format of the line that triggers function call:: ENABLE_HOST_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_event_handler(self, host): '''Enable event handlers for a host Format of the line that triggers function call:: ENABLE_HOST_EVENT_HANDLER;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_flap_detection(self, host): '''Enable flap detection for a host Format of the line that triggers function call:: ENABLE_HOST_FLAP_DETECTION;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_freshness_check(self, host): '''Enable freshness check for a host Format of the line that triggers function call:: ENABLE_HOST_FRESHNESS_CHECK;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_freshness_checks(self): '''Enable freshness checks (globally) Format of the line that triggers function call:: ENABLE_HOST_FRESHNESS_CHECKS :return: None ''' pass def enable_host_notifications(self, host): '''Enable notifications for a host Format of the line that triggers function call:: ENABLE_HOST_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_svc_checks(self, host): '''Enable service checks for a host Format of the line that triggers function call:: ENABLE_HOST_SVC_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_host_svc_notifications(self, host): '''Enable services notifications for a host Format of the line that triggers function call:: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_notifications(self): '''Enable notifications (globally) Format of the line that triggers function call:: ENABLE_NOTIFICATIONS :return: None ''' pass def enable_passive_host_checks(self, host): '''Enable passive checks for a host Format of the line that triggers function call:: ENABLE_PASSIVE_HOST_CHECKS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass def enable_passive_svc_checks(self, service): '''Enable passive checks for a service Format of the line that triggers function call:: ENABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def enable_performance_data(self): '''Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None ''' pass def enable_servicegroup_host_checks(self, servicegroup): '''Enable host checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def enable_servicegroup_host_notifications(self, servicegroup): '''Enable host notifications for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def enable_servicegroup_passive_host_checks(self, servicegroup): '''Enable passive host checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def enable_servicegroup_passive_svc_checks(self, servicegroup): '''Enable passive service checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def enable_servicegroup_svc_checks(self, servicegroup): '''Enable service checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def enable_servicegroup_svc_notifications(self, servicegroup): '''Enable service notifications for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None ''' pass def enable_service_freshness_checks(self): '''Enable service freshness checks (globally) Format of the line that triggers function call:: ENABLE_SERVICE_FRESHNESS_CHECKS :return: None ''' pass def enable_svc_check(self, service): '''Enable checks for a service Format of the line that triggers function call:: ENABLE_SVC_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def enable_svc_event_handler(self, service): '''Enable event handlers for a service Format of the line that triggers function call:: ENABLE_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def enable_svc_freshness_check(self, service): '''Enable freshness check for a service Format of the line that triggers function call:: ENABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def enable_svc_flap_detection(self, service): '''Enable flap detection for a service Format of the line that triggers function call:: ENABLE_SVC_FLAP_DETECTION;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def enable_svc_notifications(self, service): '''Enable notifications for a service Format of the line that triggers function call:: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def process_file(self, file_name, delete): '''DOES NOTHING (should process a file) Format of the line that triggers function call:: PROCESS_FILE;<file_name>;<delete> :param file_name: file to process :type file_name: str :param delete: delete after processing :type delete: :return: None ''' pass def process_host_check_result(self, host, status_code, plugin_output): '''Process host check result Format of the line that triggers function call:: PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host: host to process check to :type host: alignak.objects.host.Host :param status_code: exit code of plugin :type status_code: int :param plugin_output: plugin output :type plugin_output: str :return: None TODO: say that check is PASSIVE ''' pass def process_host_output(self, host, plugin_output): '''Process host output Format of the line that triggers function call:: PROCESS_HOST_OUTPUT;<host_name>;<plugin_output> :param host: host to process check to :type host: alignak.objects.host.Host :param plugin_output: plugin output :type plugin_output: str :return: None ''' pass def process_service_check_result(self, service, return_code, plugin_output): '''Process service check result Format of the line that triggers function call:: PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param return_code: exit code of plugin :type return_code: int :param plugin_output: plugin output :type plugin_output: str :return: None ''' pass def process_service_output(self, service, plugin_output): '''Process service output Format of the line that triggers function call:: PROCESS_SERVICE_OUTPUT;<host_name>;<service_description>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param plugin_output: plugin output :type plugin_output: str :return: None ''' pass def read_state_information(self): '''Request to load the live state from the retention storage Format of the line that triggers function call:: READ_STATE_INFORMATION :return: None ''' pass @staticmethod def remove_host_acknowledgement(host): '''Remove an acknowledgment on a host Format of the line that triggers function call:: REMOVE_HOST_ACKNOWLEDGEMENT;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None ''' pass @staticmethod def remove_svc_acknowledgement(service): '''Remove an acknowledgment on a service Format of the line that triggers function call:: REMOVE_SVC_ACKNOWLEDGEMENT;<host_name>;<service_description> :param service: service to edit :type service: alignak.objects.service.Service :return: None ''' pass def restart_program(self): '''Restart Alignak Format of the line that triggers function call:: RESTART_PROGRAM :return: None ''' pass def reload_config(self): '''Reload Alignak configuration Format of the line that triggers function call:: RELOAD_CONFIG :return: None ''' pass def save_state_information(self): '''Request to save the live state to the retention Format of the line that triggers function call:: SAVE_STATE_INFORMATION :return: None ''' pass def schedule_and_propagate_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): '''DOES NOTHING (Should create host downtime and start it?) Format of the line that triggers function call:: SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :return: None ''' pass def schedule_and_propagate_triggered_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): '''DOES NOTHING (Should create triggered host downtime and start it?) Format of the line that triggers function call:: SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :return: None ''' pass def schedule_contact_downtime(self, contact, start_time, end_time, author, comment): '''Schedule contact downtime Format of the line that triggers function call:: SCHEDULE_CONTACT_DOWNTIME;<contact_name>;<start_time>;<end_time>;<author>;<comment> :param contact: contact to put in downtime :type contact: alignak.objects.contact.Contact :param start_time: downtime start time :type start_time: int :param end_time: downtime end time :type end_time: int :param author: downtime author :type author: str :param comment: text comment :type comment: str :return: None ''' pass def schedule_forced_host_check(self, host, check_time): '''Schedule a forced check on a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None ''' pass def schedule_forced_host_svc_checks(self, host, check_time): '''Schedule a forced check on all services of a host Format of the line that triggers function call:: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: int :return: None ''' pass def schedule_forced_svc_check(self, service, check_time): '''Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None ''' pass def schedule_hostgroup_host_downtime(self, hostgroup, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a downtime for each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def schedule_hostgroup_svc_downtime(self, hostgroup, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a downtime for each service of each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;<hostgroup_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def schedule_host_check(self, host, check_time): '''Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None ''' pass def schedule_host_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a host downtime Format of the line that triggers function call:: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :param host: host to schedule downtime :type host: alignak.object.host.Host :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def schedule_host_svc_checks(self, host, check_time): '''Schedule a check on all services of a host Format of the line that triggers function call:: SCHEDULE_HOST_SVC_CHECKS;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None ''' pass def schedule_host_svc_downtime(self, host, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a service downtime for each service of an host Format of the line that triggers function call:: SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param host: host to schedule downtime :type host: alignak.object.host.Host :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def schedule_servicegroup_host_downtime(self, servicegroup, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a host downtime for each host of services in a servicegroup Format of the line that triggers function call:: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>; <trigger_id>;<duration>;<author>;<comment> :param servicegroup: servicegroup to schedule downtime :type servicegroup: alignak.object.servicegroup.Servicegroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def schedule_servicegroup_svc_downtime(self, servicegroup, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a service downtime for each service of a servicegroup Format of the line that triggers function call:: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param servicegroup: servicegroup to schedule downtime :type servicegroup: alignak.object.servicegroup.Servicegroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def schedule_svc_check(self, service, check_time): '''Schedule a check on a service Format of the line that triggers function call:: SCHEDULE_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: :return: None ''' pass def schedule_svc_downtime(self, service, start_time, end_time, fixed, trigger_id, duration, author, comment): '''Schedule a service downtime Format of the line that triggers function call:: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description><start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param service: service to check :type service: alignak.object.service.Service :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: bool :param trigger_id: downtime id that triggered this one :type trigger_id: int :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None ''' pass def send_custom_host_notification(self, host, options, author, comment): '''DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_HOST_NOTIFICATION;<host_name>;<options>;<author>;<comment> :param host: host to send notif for :type host: alignak.object.host.Host :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None ''' pass def send_custom_svc_notification(self, service, options, author, comment): '''DOES NOTHING (Should send a custom notification) Format of the line that triggers function call:: SEND_CUSTOM_SVC_NOTIFICATION;<host_name>;<service_description>;<options>;<author>;<comment>> :param service: service to send notif for :type service: alignak.object.service.Service :param options: notification options :type options: :param author: notification author :type author: str :param comment: notification text :type comment: str :return: None ''' pass def set_host_notification_number(self, host, notification_number): '''DOES NOTHING (Should set host notification number) Format of the line that triggers function call:: SET_HOST_NOTIFICATION_NUMBER;<host_name>;<notification_number> :param host: host to edit :type host: alignak.object.host.Host :param notification_number: new value to set :type notification_number: :return: None ''' pass def set_svc_notification_number(self, service, notification_number): '''DOES NOTHING (Should set host notification number) Format of the line that triggers function call:: SET_SVC_NOTIFICATION_NUMBER;<host_name>;<service_description>;<notification_number> :param service: service to edit :type service: alignak.object.service.Service :param notification_number: new value to set :type notification_number: :return: None ''' pass def shutdown_program(self): '''DOES NOTHING (Should shutdown Alignak) Format of the line that triggers function call:: SHUTDOWN_PROGRAM :return: None ''' pass def start_accepting_passive_host_checks(self): '''Enable passive host check submission (globally) Format of the line that triggers function call:: START_ACCEPTING_PASSIVE_HOST_CHECKS :return: None ''' pass def start_accepting_passive_svc_checks(self): '''Enable passive service check submission (globally) Format of the line that triggers function call:: START_ACCEPTING_PASSIVE_SVC_CHECKS :return: None ''' pass def start_executing_host_checks(self): '''Enable host check execution (globally) Format of the line that triggers function call:: START_EXECUTING_HOST_CHECKS :return: None ''' pass def start_executing_svc_checks(self): '''Enable service check execution (globally) Format of the line that triggers function call:: START_EXECUTING_SVC_CHECKS :return: None ''' pass def stop_accepting_passive_host_checks(self): '''Disable passive host check submission (globally) Format of the line that triggers function call:: STOP_ACCEPTING_PASSIVE_HOST_CHECKS :return: None ''' pass def stop_accepting_passive_svc_checks(self): '''Disable passive service check submission (globally) Format of the line that triggers function call:: STOP_ACCEPTING_PASSIVE_SVC_CHECKS :return: None ''' pass def stop_executing_host_checks(self): '''Disable host check execution (globally) Format of the line that triggers function call:: STOP_EXECUTING_HOST_CHECKS :return: None ''' pass def stop_executing_svc_checks(self): '''Disable service check execution (globally) Format of the line that triggers function call:: STOP_EXECUTING_SVC_CHECKS :return: None ''' pass def launch_svc_event_handler(self, service): '''Launch event handler for a service Format of the line that triggers function call:: LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to execute the event handler :type service: alignak.objects.service.Service :return: None ''' pass def launch_host_event_handler(self, host): '''Launch event handler for a service Format of the line that triggers function call:: LAUNCH_HOST_EVENT_HANDLER;<host_name> :param host: host to execute the event handler :type host: alignak.objects.host.Host :return: None ''' pass
177
171
20
3
8
9
3
0.91
1
15
5
0
164
14
170
170
3,906
598
1,736
361
1,550
1,580
1,119
343
948
50
1
7
436
3,901
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/comment.py
alignak.comment.Comment
class Comment(AlignakObject): """Comment class implements comments for monitoring purpose. It contains data like author, type etc.. """ my_type = 'comment' properties = { 'entry_time': IntegerProp(default=0), 'entry_type': IntegerProp(), 'author': StringProp(default=u'Alignak'), 'comment': StringProp(default=u''), 'comment_type': IntegerProp(), 'source': IntegerProp(default=0), 'expires': BoolProp(default=False), 'ref': StringProp(default=u'unset'), 'ref_type': StringProp(default=u'unset'), } def __init__(self, params, parsing=False): """Adds a comment to a particular service. :param ref: reference object (host / service) :type ref: alignak.object.schedulingitem.SchedulingItem :param author: Author of this comment :type author: str :param comment: text comment itself :type comment: str :param comment_type: comment type :: * 1 <=> HOST_COMMENT * 2 <=> SERVICE_COMMENT :type comment_type: int :param entry_type: type of entry linked to this comment :: * 1 <=> USER_COMMENT * 2 <=>DOWNTIME_COMMENT * 3 <=>FLAPPING_COMMENT * 4 <=>ACKNOWLEDGEMENT_COMMENT :type entry_type: int :param source: source of this comment :: * 0 <=> COMMENTSOURCE_INTERNAL * 1 <=> COMMENTSOURCE_EXTERNAL :type source: int :param expires: comment expires or not :type expires: bool :return: None """ super(Comment, self).__init__(params, parsing) if not hasattr(self, 'entry_time') or not self.entry_time: self.entry_time = int(time.time()) self.fill_default() def __str__(self): # pragma: no cover return "Comment id=%s %s" % (self.uuid, self.comment) def get_comment_brok(self, host_name, service_name=''): """Get a comment brok :param host_name: :param service_name: :return: brok with wanted data :rtype: alignak.brok.Brok """ data = self.serialize() data['host'] = host_name if service_name: data['service'] = service_name return Brok({'type': 'comment', 'data': data})
class Comment(AlignakObject): '''Comment class implements comments for monitoring purpose. It contains data like author, type etc.. ''' def __init__(self, params, parsing=False): '''Adds a comment to a particular service. :param ref: reference object (host / service) :type ref: alignak.object.schedulingitem.SchedulingItem :param author: Author of this comment :type author: str :param comment: text comment itself :type comment: str :param comment_type: comment type :: * 1 <=> HOST_COMMENT * 2 <=> SERVICE_COMMENT :type comment_type: int :param entry_type: type of entry linked to this comment :: * 1 <=> USER_COMMENT * 2 <=>DOWNTIME_COMMENT * 3 <=>FLAPPING_COMMENT * 4 <=>ACKNOWLEDGEMENT_COMMENT :type entry_type: int :param source: source of this comment :: * 0 <=> COMMENTSOURCE_INTERNAL * 1 <=> COMMENTSOURCE_EXTERNAL :type source: int :param expires: comment expires or not :type expires: bool :return: None ''' pass def __str__(self): pass def get_comment_brok(self, host_name, service_name=''): '''Get a comment brok :param host_name: :param service_name: :return: brok with wanted data :rtype: alignak.brok.Brok ''' pass
4
3
18
4
4
11
2
1
1
3
1
0
3
1
3
6
84
15
35
8
31
35
16
8
12
2
2
1
5
3,902
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/graph.py
alignak.graph.Graph
class Graph(object): """Graph is a class to make graph things like DFS checks or accessibility Why use an atomic bomb when a little hammer is enough? Graph are oriented. """ def __init__(self): self.nodes = {} def add_node(self, node): """Create the node key into the mode dict with [] value :param node: node to add :type node: object :return: None """ self.nodes[node] = {"dfs_loop_status": "", "sons": []} def add_nodes(self, nodes): """Add several nodes into the nodes dict :param nodes: nodes to add :type nodes: object :return: None """ for node in nodes: self.add_node(node) def add_edge(self, from_node, to_node): """Add edge between two node The edge is oriented :param from_node: node where edge starts :type from_node: object :param to_node: node where edge ends :type to_node: object :return: None """ # Maybe to_node is unknown if to_node not in self.nodes: self.add_node(to_node) try: self.nodes[from_node]["sons"].append(to_node) # If from_node does not exist, add it with its son except KeyError: self.nodes[from_node] = {"dfs_loop_status": "", "sons": [to_node]} def loop_check(self): """Check if we have a loop in the graph :return: Nodes in loop :rtype: list """ in_loop = [] # Add the tag for dfs check for node in list(self.nodes.values()): node['dfs_loop_status'] = 'DFS_UNCHECKED' # Now do the job for node_id, node in self.nodes.items(): # Run the dfs only if the node has not been already done */ if node['dfs_loop_status'] == 'DFS_UNCHECKED': self.dfs_loop_search(node_id) # If LOOP_INSIDE, must be returned if node['dfs_loop_status'] == 'DFS_LOOP_INSIDE': in_loop.append(node_id) # Remove the tag for node in list(self.nodes.values()): del node['dfs_loop_status'] return in_loop def dfs_loop_search(self, root): """Main algorithm to look for loop. It tags nodes and find ones stuck in loop. * Init all nodes with DFS_UNCHECKED value * DFS_TEMPORARY_CHECKED means we found it once * DFS_OK : this node (and all sons) are fine * DFS_NEAR_LOOP : One problem was found in of of the son * DFS_LOOP_INSIDE : This node is part of a loop :param root: Root of the dependency tree :type root: :return: None """ # Make the root temporary checked self.nodes[root]['dfs_loop_status'] = 'DFS_TEMPORARY_CHECKED' # We are scanning the sons for child in self.nodes[root]["sons"]: child_status = self.nodes[child]['dfs_loop_status'] # If a child is not checked, check it if child_status == 'DFS_UNCHECKED': self.dfs_loop_search(child) child_status = self.nodes[child]['dfs_loop_status'] # If a child has already been temporary checked, it's a problem, # loop inside, and its a checked status if child_status == 'DFS_TEMPORARY_CHECKED': self.nodes[child]['dfs_loop_status'] = 'DFS_LOOP_INSIDE' self.nodes[root]['dfs_loop_status'] = 'DFS_LOOP_INSIDE' # If a child has already been temporary checked, it's a problem, loop inside if child_status in ('DFS_NEAR_LOOP', 'DFS_LOOP_INSIDE'): # if a node is known to be part of a loop, do not let it be less if self.nodes[root]['dfs_loop_status'] != 'DFS_LOOP_INSIDE': self.nodes[root]['dfs_loop_status'] = 'DFS_NEAR_LOOP' # We've already seen this child, it's a problem self.nodes[child]['dfs_loop_status'] = 'DFS_LOOP_INSIDE' # If root have been modified, do not set it OK # A node is OK if and only if all of its children are OK # if it does not have a child, goes ok if self.nodes[root]['dfs_loop_status'] == 'DFS_TEMPORARY_CHECKED': self.nodes[root]['dfs_loop_status'] = 'DFS_OK' def get_accessibility_packs(self): """Get accessibility packs of the graph: in one pack element are related in a way. Between packs, there is no relation at all. TODO: Make it work for directional graph too Because for now, edge must be father->son AND son->father :return: packs of nodes :rtype: list """ packs = [] # Add the tag for dfs check for node in list(self.nodes.values()): node['dfs_loop_status'] = 'DFS_UNCHECKED' for node_id, node in self.nodes.items(): # Run the dfs only if the node is not already done */ if node['dfs_loop_status'] == 'DFS_UNCHECKED': packs.append(self.dfs_get_all_childs(node_id)) # Remove the tag for node in list(self.nodes.values()): del node['dfs_loop_status'] return packs def dfs_get_all_childs(self, root): """Recursively get all sons of this node :param root: node to get sons :type root: :return: sons :rtype: list """ self.nodes[root]['dfs_loop_status'] = 'DFS_CHECKED' ret = set() # Me ret.add(root) # And my sons ret.update(self.nodes[root]['sons']) for child in self.nodes[root]['sons']: # I just don't care about already checked children if self.nodes[child]['dfs_loop_status'] == 'DFS_UNCHECKED': ret.update(self.dfs_get_all_childs(child)) return list(ret)
class Graph(object): '''Graph is a class to make graph things like DFS checks or accessibility Why use an atomic bomb when a little hammer is enough? Graph are oriented. ''' def __init__(self): pass def add_node(self, node): '''Create the node key into the mode dict with [] value :param node: node to add :type node: object :return: None ''' pass def add_nodes(self, nodes): '''Add several nodes into the nodes dict :param nodes: nodes to add :type nodes: object :return: None ''' pass def add_edge(self, from_node, to_node): '''Add edge between two node The edge is oriented :param from_node: node where edge starts :type from_node: object :param to_node: node where edge ends :type to_node: object :return: None ''' pass def loop_check(self): '''Check if we have a loop in the graph :return: Nodes in loop :rtype: list ''' pass def dfs_loop_search(self, root): '''Main algorithm to look for loop. It tags nodes and find ones stuck in loop. * Init all nodes with DFS_UNCHECKED value * DFS_TEMPORARY_CHECKED means we found it once * DFS_OK : this node (and all sons) are fine * DFS_NEAR_LOOP : One problem was found in of of the son * DFS_LOOP_INSIDE : This node is part of a loop :param root: Root of the dependency tree :type root: :return: None ''' pass def get_accessibility_packs(self): '''Get accessibility packs of the graph: in one pack element are related in a way. Between packs, there is no relation at all. TODO: Make it work for directional graph too Because for now, edge must be father->son AND son->father :return: packs of nodes :rtype: list ''' pass def dfs_get_all_childs(self, root): '''Recursively get all sons of this node :param root: node to get sons :type root: :return: sons :rtype: list ''' pass
9
8
19
3
8
9
4
1.19
1
3
0
0
8
1
8
8
168
32
62
21
53
74
62
21
53
7
1
3
28
3,903
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/http/client.py
alignak.http.client.HTTPClient
class HTTPClient(object): """HTTPClient class use python request to communicate over HTTP Basically used to get / post to other daemons """ def __init__(self, address='', port=0, use_ssl=False, short_timeout=3, long_timeout=120, uri='', strong_ssl=False, proxy=''): # pylint: disable=too-many-arguments self.address = address self.port = port self.short_timeout = short_timeout self.long_timeout = long_timeout self.use_ssl = use_ssl self.strong_ssl = strong_ssl if not uri: protocol = "https" if use_ssl else "http" uri = "%s://%s:%s/" % (protocol, self.address, self.port) self.uri = uri self._requests_con = requests.Session() # self.session = requests.Session() self._requests_con.header = {'Content-Type': 'application/json'} # Requests HTTP adapters http_adapter = HTTPAdapter(max_retries=3) https_adapter = HTTPAdapter(max_retries=3) self._requests_con.mount('http://', http_adapter) self._requests_con.mount('https://', https_adapter) self.set_proxy(proxy) def make_uri(self, path): """Create uri from path :param path: path to make uri :type path: str :return: self.uri + path :rtype: str """ return '%s%s' % (self.uri, path) def make_timeout(self, wait): """Get short_timeout depending on wait time :param wait: wait for a long timeout :type wait: bool :return: self.short_timeout if wait is short, self.long_timeout otherwise :rtype: int """ return self.short_timeout if not wait else self.long_timeout def set_proxy(self, proxy): # pragma: no cover, not with unit tests """Set HTTP proxy :param proxy: proxy url :type proxy: str :return: None """ if proxy: logger.debug('PROXY SETTING PROXY %s', proxy) self._requests_con.proxies = { 'http': proxy, 'https': proxy, } def get(self, path, args=None, wait=False): """GET an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: None """ if args is None: args = {} uri = self.make_uri(path) timeout = self.make_timeout(wait) try: logger.debug("get: %s, timeout: %s, params: %s", uri, timeout, args) rsp = self._requests_con.get(uri, params=args, timeout=timeout, verify=self.strong_ssl) logger.debug("got: %d - %s", rsp.status_code, rsp.text) if rsp.status_code != 200: raise HTTPClientDataException(rsp.status_code, rsp.text, uri) return rsp.json() except (requests.Timeout, requests.ConnectTimeout): # pragma: no cover raise HTTPClientTimeoutException(timeout, uri) except requests.ConnectionError as exp: # pragma: no cover raise HTTPClientConnectionException(uri, exp.args[0]) except Exception as exp: # pragma: no cover raise HTTPClientException('Request error to %s: %s' % (uri, exp)) def post(self, path, args, wait=False): """POST an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: Content of the HTTP response if server returned 200 :rtype: str """ uri = self.make_uri(path) timeout = self.make_timeout(wait) for (key, value) in list(args.items()): args[key] = serialize(value, True) try: logger.debug("post: %s, timeout: %s, params: %s", uri, timeout, args) rsp = self._requests_con.post(uri, json=args, timeout=timeout, verify=self.strong_ssl) logger.debug("got: %d - %s", rsp.status_code, rsp.text) if rsp.status_code != 200: raise HTTPClientDataException(rsp.status_code, rsp.text, uri) return rsp.content except (requests.Timeout, requests.ConnectTimeout): # pragma: no cover raise HTTPClientTimeoutException(timeout, uri) except requests.ConnectionError as exp: # pragma: no cover raise HTTPClientConnectionException(uri, exp.args[0]) except Exception as exp: # pragma: no cover raise HTTPClientException('Request error to %s: %s' % (uri, exp)) def put(self, path, args, wait=False): # pragma: no cover, looks never used! # todo: remove this because it looks never used anywhere... """PUT and HTTP request to a daemon :param path: path to do the request :type path: str :param args: data to send in the request :type args: :return: Content of the HTTP response if server returned 200 :rtype: str """ uri = self.make_uri(path) timeout = self.make_timeout(wait) try: logger.debug("put: %s, timeout: %s, params: %s", uri, timeout, args) rsp = self._requests_con.put(uri, args, timeout=timeout, verify=self.strong_ssl) logger.debug("got: %d - %s", rsp.status_code, rsp.text) if rsp.status_code != 200: raise HTTPClientDataException(rsp.status_code, rsp.text, uri) return rsp.content except (requests.Timeout, requests.ConnectTimeout): # pragma: no cover raise HTTPClientTimeoutException(timeout, uri) except requests.ConnectionError as exp: # pragma: no cover raise HTTPClientConnectionException(uri, exp.args[0]) except Exception as exp: # pragma: no cover raise HTTPClientException('Request error to %s: %s' % (uri, exp))
class HTTPClient(object): '''HTTPClient class use python request to communicate over HTTP Basically used to get / post to other daemons ''' def __init__(self, address='', port=0, use_ssl=False, short_timeout=3, long_timeout=120, uri='', strong_ssl=False, proxy=''): pass def make_uri(self, path): '''Create uri from path :param path: path to make uri :type path: str :return: self.uri + path :rtype: str ''' pass def make_timeout(self, wait): '''Get short_timeout depending on wait time :param wait: wait for a long timeout :type wait: bool :return: self.short_timeout if wait is short, self.long_timeout otherwise :rtype: int ''' pass def set_proxy(self, proxy): '''Set HTTP proxy :param proxy: proxy url :type proxy: str :return: None ''' pass def get(self, path, args=None, wait=False): '''GET an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: None ''' pass def post(self, path, args, wait=False): '''POST an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: Content of the HTTP response if server returned 200 :rtype: str ''' pass def put(self, path, args, wait=False): '''PUT and HTTP request to a daemon :param path: path to do the request :type path: str :param args: data to send in the request :type args: :return: Content of the HTTP response if server returned 200 :rtype: str ''' pass
8
7
20
1
12
8
4
0.75
1
11
4
0
7
8
7
7
150
16
83
33
74
62
79
29
71
6
1
2
25
3,904
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/http/client.py
alignak.http.client.HTTPClientConnectionException
class HTTPClientConnectionException(Exception): """HTTP Connection Exception - raised when connection fails with the server. This specific exception is raised when a connection exception is catched. Its attribute are: - uri: the requested URI, - msg: the exception message """ def __init__(self, uri, msg): # Call the base class constructor with the parameters it needs super(HTTPClientConnectionException, self).__init__() self.uri = uri self.msg = msg def __str__(self): # pragma: no cover """Exception to String""" return "Server not available: %s - %s" % (self.uri, self.msg)
class HTTPClientConnectionException(Exception): '''HTTP Connection Exception - raised when connection fails with the server. This specific exception is raised when a connection exception is catched. Its attribute are: - uri: the requested URI, - msg: the exception message ''' def __init__(self, uri, msg): pass def __str__(self): '''Exception to String''' pass
3
2
5
1
3
2
1
1.29
1
1
0
0
2
2
2
12
18
3
7
5
4
9
7
5
4
1
3
0
2
3,905
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/daemons/pollerdaemon.py
alignak.daemons.pollerdaemon.Poller
class Poller(Satellite): """Poller class. Referenced as "app" in most Interface """ do_checks = True # I do checks do_actions = False # but no actions my_type = 'poller' properties = Satellite.properties.copy() properties.update({ 'type': StringProp(default='poller'), 'port': IntegerProp(default=7771) }) def __init__(self, **kwargs): """Poller daemon initialisation :param kwargs: command line arguments """ super(Poller, self).__init__(kwargs.get('daemon_name', 'Default-poller'), **kwargs)
class Poller(Satellite): '''Poller class. Referenced as "app" in most Interface ''' def __init__(self, **kwargs): '''Poller daemon initialisation :param kwargs: command line arguments ''' pass
2
2
6
1
2
3
1
0.54
1
1
0
0
1
0
1
83
22
4
13
6
11
7
8
6
6
1
4
0
1
3,906
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/commandcall.py
alignak.commandcall.CommandCall
class CommandCall(AlignakObject): # pylint: disable=too-many-instance-attributes """This class is use when a service, contact or host define a command with args. """ # AutoSlots create the __slots__ with properties and # running_properties names __metaclass__ = AutoSlots my_type = 'CommandCall' properties = { # Initial command line in the configuration 'command_line': StringProp(), # Command line split: name and arguments 'command_name': StringProp(), 'args': ListProp(default=[]), # alignak.objects.Command 'command': StringProp(default=u''), 'poller_tag': StringProp(default=u''), 'reactionner_tag': StringProp(default=u''), 'module_type': StringProp(default=u'fork'), 'valid': BoolProp(default=True), 'timeout': IntegerProp(default=-1), 'enable_environment_macros': BoolProp(default=False), } # pylint: disable=too-many-branches def __init__(self, params, parsing=False): """ Note: A CommandCall may receive 'commands' in its parameters; it is the whole known commands list in which the command must be found. If no commands list is provided, the CommandCall is initialized with the provided parameters that must contain one Command. :param params: """ if params is None: params = {} if not parsing: # Deserialize an existing object # todo: Why not initializing the running properties in this case? super(CommandCall, self).__init__(params, parsing=parsing) return # List of known commands commands = None if 'commands' in params: commands = params.pop('commands') # Create a base command call super(CommandCall, self).__init__(params, parsing=parsing) if parsing: # Fill default object values from the properties self.fill_default() # fixme: why not inheriting from Item? # This is a minimum copy of the Item class initialization! for key in params: try: if key in self.__class__.properties: val = self.__class__.properties[key].pythonize(params[key]) else: val = ToGuessProp().pythonize(params[key]) except (PythonizeError, AttributeError, ValueError, TypeError) as exp: raise PythonizeError("Error while pythonizing parameter '%s': %s" % (key, exp)) setattr(self, key, val) # Get command and arguments self.command_name, self.args = self.get_command_and_args() # todo: remove this... 1/ unserialize should have handled and 2/ we should not even be here! if not parsing: # Link the provided Alignak command with myself self.command = unserialize(params['command']) # We received a commands list to search into... if commands: self.valid = False self.command = commands.find_by_name(self.command_name) if self.command is not None: # Found a declared command self.valid = True # Get the host/service poller/reactionner tag, # else the ones defined in the command if self.poller_tag in [None, 'None', '']: self.poller_tag = self.command.poller_tag # Same for reactionner tag if self.reactionner_tag in [None, 'None', '']: self.reactionner_tag = self.command.reactionner_tag self.module_type = self.command.module_type self.enable_environment_macros = self.command.enable_environment_macros self.timeout = int(self.command.timeout) def __str__(self): # pragma: no cover return "<CommandCall %s, uuid=%s, command line: %s />" \ % (self.get_name(), self.uuid, getattr(self, 'command_line', None)) __repr__ = __str__ def serialize(self, no_json=True, printing=False): # uuid is not in *_properties res = {'uuid': self.uuid} for prop in self.__class__.properties: try: res[prop] = serialize(getattr(self, prop), no_json=no_json, printing=printing) except AttributeError: pass # for prop in self.__class__.properties: # if prop in ['command']: # # Specific for the command (alignak.objects.command.Command object) # res[prop] = serialize(getattr(self, prop), # no_json=no_json, printing=printing) # elif hasattr(self, prop): # res[prop] = getattr(self, prop) return res def get_command_and_args(self): """We want to get the command and the args with ! splitting. but don't forget to protect against the ! to avoid splitting on them Remember: A Nagios-like command is command_name!arg1!arg2!... :return: None """ # First protect tab = self.command_line.replace(r'\!', '___PROTECT_EXCLAMATION___').split('!') return tab[0].strip(), [s.replace('___PROTECT_EXCLAMATION___', '!') for s in tab[1:]] def is_valid(self): """Getter for valid attribute :return: True if object is valid, False otherwise :rtype: bool """ return self.valid def get_name(self): """Getter for command name attribute :return: command name :rtype: str """ return getattr(self, 'command_name', 'Unset')
class CommandCall(AlignakObject): '''This class is use when a service, contact or host define a command with args. ''' def __init__(self, params, parsing=False): ''' Note: A CommandCall may receive 'commands' in its parameters; it is the whole known commands list in which the command must be found. If no commands list is provided, the CommandCall is initialized with the provided parameters that must contain one Command. :param params: ''' pass def __str__(self): pass def serialize(self, no_json=True, printing=False): pass def get_command_and_args(self): '''We want to get the command and the args with ! splitting. but don't forget to protect against the ! to avoid splitting on them Remember: A Nagios-like command is command_name!arg1!arg2!... :return: None ''' pass def is_valid(self): '''Getter for valid attribute :return: True if object is valid, False otherwise :rtype: bool ''' pass def get_name(self): '''Getter for command name attribute :return: command name :rtype: str ''' pass
7
5
19
3
9
8
3
0.68
1
7
2
0
6
9
6
9
161
26
81
26
74
55
57
23
50
13
2
4
20
3,907
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/alignak/http/generic_interface.py
alignak.http.generic_interface.GenericInterface
class GenericInterface(object): """Interface for inter satellites communications""" def __init__(self, app): self.app = app self.start_time = int(time.time()) # Set a running identifier that will change if the attached daemon is restarted self.running_id = "%d.%d" % ( self.start_time, random.randint(0, 100000000) ) ##### # _____ _ # | ____| __ __ _ __ ___ ___ ___ __| | # | _| \ \/ / | '_ \ / _ \ / __| / _ \ / _` | # | |___ > < | |_) | | (_) | \__ \ | __/ | (_| | # |_____| /_/\_\ | .__/ \___/ |___/ \___| \__,_| # |_| ##### @cherrypy.expose @cherrypy.tools.json_out() def index(self): """Wrapper to call api from / This will return the daemon identity and main information :return: function list """ return self.identity() @cherrypy.expose @cherrypy.tools.json_out() def identity(self): """Get the daemon identity This will return an object containing some properties: - alignak: the Alignak instance name - version: the Alignak version - type: the daemon type - name: the daemon name :return: daemon identity :rtype: dict """ res = self.app.get_id() res.update({"start_time": self.start_time}) res.update({"running_id": self.running_id}) return res @cherrypy.expose @cherrypy.tools.json_out() def api(self): """List the methods available on the daemon Web service interface :return: a list of methods and parameters :rtype: dict """ functions = [x[0]for x in inspect.getmembers(self, predicate=inspect.ismethod) if not x[0].startswith('_')] full_api = { 'doc': u"When posting data you have to use the JSON format.", 'api': [] } my_daemon_type = "%s" % getattr(self.app, 'type', 'unknown') my_address = getattr(self.app, 'host_name', getattr(self.app, 'name', 'unknown')) if getattr(self.app, 'address', '127.0.0.1') not in ['127.0.0.1']: # If an address is explicitely specified, I must use it! my_address = self.app.address for fun in functions: endpoint = { 'daemon': my_daemon_type, 'name': fun, 'doc': getattr(self, fun).__doc__, 'uri': '%s://%s:%s/%s' % (getattr(self.app, 'scheme', 'http'), my_address, self.app.port, fun), 'args': {} } try: spec = inspect.getfullargspec(getattr(self, fun)) except Exception: # pylint: disable=broad-except # pylint: disable=deprecated-method spec = inspect.getargspec(getattr(self, fun)) args = [a for a in spec.args if a not in ('self', 'cls')] if spec.defaults: a_dict = dict(list(zip(args, spec.defaults))) else: a_dict = dict( list(zip(args, ("No default value",) * len(args)))) endpoint["args"] = a_dict full_api['api'].append(endpoint) return full_api @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def stop_request(self, stop_now='0'): """Request the daemon to stop If `stop_now` is set to '1' the daemon will stop now. Else, the daemon will enter the stop wait mode. In this mode the daemon stops its activity and waits until it receives a new `stop_now` request to stop really. :param stop_now: stop now or go to stop wait mode :type stop_now: bool :return: None """ self.app.interrupted = (stop_now == '1') self.app.will_stop = True return True @cherrypy.expose @cherrypy.tools.json_out() def get_log_level(self): """Get the current daemon log level Returns an object with the daemon identity and a `log_level` property. running_id :return: current log level :rtype: str """ level_names = { logging.DEBUG: 'DEBUG', logging.INFO: 'INFO', logging.WARNING: 'WARNING', logging.ERROR: 'ERROR', logging.CRITICAL: 'CRITICAL' } alignak_logger = logging.getLogger(ALIGNAK_LOGGER_NAME) res = self.identity() res.update({"log_level": alignak_logger.getEffectiveLevel(), "log_level_name": level_names[alignak_logger.getEffectiveLevel()]}) return res @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def set_log_level(self, log_level=None): """Set the current log level for the daemon The `log_level` parameter must be in [DEBUG, INFO, WARNING, ERROR, CRITICAL] In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error Else, this function returns True :param log_level: a value in one of the above :type log_level: str :return: see above :rtype: dict """ if log_level is None: log_level = cherrypy.request.json['log_level'] if log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']: return {'_status': u'ERR', '_message': u"Required log level is not allowed: %s" % log_level} alignak_logger = logging.getLogger(ALIGNAK_LOGGER_NAME) alignak_logger.setLevel(log_level) return self.get_log_level() set_log_level.method = 'post' @cherrypy.expose @cherrypy.tools.json_out() def managed_configurations(self): """Get the arbiter configuration managed by the daemon For an arbiter daemon, it returns an empty object For all other daemons it returns a dictionary formated list of the scheduler links managed by the daemon: { 'instance_id': { 'hash': , 'push_flavor': , 'managed_conf_id': } } If a daemon returns an empty list, it means that it has not yet received its configuration from the arbiter. :return: managed configuration :rtype: list """ return self.app.get_managed_configurations() @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def stats(self, details=False): """Get statistics and information from the daemon Returns an object with the daemon identity, the daemon start_time and some extra properties depending upon the daemon type. All daemons provide these ones: - program_start: the Alignak start timestamp - spare: to indicate if the daemon is a spare one - load: the daemon load - modules: the daemon modules information - counters: the specific daemon counters :param details: Details are required (different from 0) :type details str :return: daemon stats :rtype: dict """ if details is not False: details = bool(details) res = self.identity() res.update(self.app.get_daemon_stats(details=details)) return res ##### # ___ _ _ _ # |_ _| _ __ | |_ ___ _ __ _ __ __ _ | | ___ _ __ | | _ _ # | | | '_ \ | __| / _ \ | '__| | '_ \ / _` | | | / _ \ | '_ \ | | | | | | # | | | | | | | |_ | __/ | | | | | | | (_| | | | | (_) | | | | | | | | |_| | # |___| |_| |_| \__| \___| |_| |_| |_| \__,_| |_| \___/ |_| |_| |_| \__, | # |___/ ##### @cherrypy.expose @cherrypy.tools.json_out() def _wait_new_conf(self): """Ask the daemon to drop its configuration and wait for a new one :return: None """ with self.app.conf_lock: logger.debug( "My Arbiter wants me to wait for a new configuration.") # Clear can occur while setting up a new conf and lead to error. self.app.schedulers.clear() self.app.cur_conf = {} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_configuration(self, pushed_configuration=None): """Send a new configuration to the daemon This function is not intended for external use. It is quite complex to build a configuration for a daemon and it is the arbiter dispatcher job ;) :param pushed_configuration: new conf to send :return: None """ if pushed_configuration is None: confs = cherrypy.request.json pushed_configuration = confs['conf'] # It is safer to lock this part with self.app.conf_lock: self.app.new_conf = pushed_configuration return True _push_configuration.method = 'post' @cherrypy.expose @cherrypy.tools.json_out() def _have_conf(self, magic_hash=None): """Get the daemon current configuration state If the daemon has received a configuration from its arbiter, this will return True If a `magic_hash` is provided it is compared with the one included in the daemon configuration and this function returns True only if they match! :return: boolean indicating if the daemon has a configuration :rtype: bool """ self.app.have_conf = getattr( self.app, 'cur_conf', None) not in [None, {}] if magic_hash is not None: # Beware, we got an str in entry, not an int magic_hash = int(magic_hash) # I've got a conf and a good one return self.app.have_conf and self.app.cur_conf.magic_hash == magic_hash return self.app.have_conf @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_actions(self): """Push actions to the poller/reactionner This function is used by the scheduler to send the actions to get executed to the poller/reactionner {'actions': actions, 'instance_id': scheduler_instance_id} :return:None """ data = cherrypy.request.json with self.app.lock: self.app.add_actions( data['actions'], data['scheduler_instance_id']) _push_actions.method = 'post' @cherrypy.expose @cherrypy.tools.json_out() def _external_commands(self): """Get the external commands from the daemon Use a lock for this function to protect :return: serialized external command list :rtype: str """ res = [] with self.app.external_commands_lock: for cmd in self.app.get_external_commands(): res.append(cmd.serialize()) return res @cherrypy.expose @cherrypy.tools.json_out() def _results(self, scheduler_instance_id): """Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str """ logger.debug("Get results for the scheduler: %s", scheduler_instance_id) try: with self.app.lock: res = self.app.get_results_from_passive(scheduler_instance_id) logger.debug("Got results: %s", res) # logger.warning("Got: %s", res) # for a in res: # logger.warning("-: %s", a) # logger.warning("-: %s", a.__dict__) # Serialize but do not make it json encoded res = serialize(res, no_json=True) except Exception as exp: logger.warning("_results, exception: %s", exp) res = [] return res @cherrypy.expose @cherrypy.tools.json_out() def _broks(self, broker_name): # pylint: disable=unused-argument """Get the broks from the daemon This is used by the brokers to get the broks list of a daemon :return: Brok list serialized :rtype: dict """ try: with self.app.broks_lock: res = self.app.give_broks() # Serialize but do not make it json encoded res = serialize(res, no_json=True) except Exception as exp: logger.warning("_broks, exception: %s", exp) res = [] return res @cherrypy.expose @cherrypy.tools.json_out() def _events(self): """Get the monitoring events from the daemon This is used by the arbiter to get the monitoring events from all its satellites :return: Events list serialized :rtype: list """ try: with self.app.events_lock: res = self.app.get_events() # Serialize but do not make it json encoded res = serialize(res, no_json=True) except Exception as exp: logger.warning("_events, exception: %s", exp) res = [] return res
class GenericInterface(object): '''Interface for inter satellites communications''' def __init__(self, app): pass @cherrypy.expose @cherrypy.tools.json_out() def index(self): '''Wrapper to call api from / This will return the daemon identity and main information :return: function list ''' pass @cherrypy.expose @cherrypy.tools.json_out() def identity(self): '''Get the daemon identity This will return an object containing some properties: - alignak: the Alignak instance name - version: the Alignak version - type: the daemon type - name: the daemon name :return: daemon identity :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def api(self): '''List the methods available on the daemon Web service interface :return: a list of methods and parameters :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def stop_request(self, stop_now='0'): '''Request the daemon to stop If `stop_now` is set to '1' the daemon will stop now. Else, the daemon will enter the stop wait mode. In this mode the daemon stops its activity and waits until it receives a new `stop_now` request to stop really. :param stop_now: stop now or go to stop wait mode :type stop_now: bool :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def get_log_level(self): '''Get the current daemon log level Returns an object with the daemon identity and a `log_level` property. running_id :return: current log level :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def set_log_level(self, log_level=None): '''Set the current log level for the daemon The `log_level` parameter must be in [DEBUG, INFO, WARNING, ERROR, CRITICAL] In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error Else, this function returns True :param log_level: a value in one of the above :type log_level: str :return: see above :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def managed_configurations(self): '''Get the arbiter configuration managed by the daemon For an arbiter daemon, it returns an empty object For all other daemons it returns a dictionary formated list of the scheduler links managed by the daemon: { 'instance_id': { 'hash': , 'push_flavor': , 'managed_conf_id': } } If a daemon returns an empty list, it means that it has not yet received its configuration from the arbiter. :return: managed configuration :rtype: list ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def stats(self, details=False): '''Get statistics and information from the daemon Returns an object with the daemon identity, the daemon start_time and some extra properties depending upon the daemon type. All daemons provide these ones: - program_start: the Alignak start timestamp - spare: to indicate if the daemon is a spare one - load: the daemon load - modules: the daemon modules information - counters: the specific daemon counters :param details: Details are required (different from 0) :type details str :return: daemon stats :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _wait_new_conf(self): '''Ask the daemon to drop its configuration and wait for a new one :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_configuration(self, pushed_configuration=None): '''Send a new configuration to the daemon This function is not intended for external use. It is quite complex to build a configuration for a daemon and it is the arbiter dispatcher job ;) :param pushed_configuration: new conf to send :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _have_conf(self, magic_hash=None): '''Get the daemon current configuration state If the daemon has received a configuration from its arbiter, this will return True If a `magic_hash` is provided it is compared with the one included in the daemon configuration and this function returns True only if they match! :return: boolean indicating if the daemon has a configuration :rtype: bool ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_actions(self): '''Push actions to the poller/reactionner This function is used by the scheduler to send the actions to get executed to the poller/reactionner {'actions': actions, 'instance_id': scheduler_instance_id} :return:None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _external_commands(self): '''Get the external commands from the daemon Use a lock for this function to protect :return: serialized external command list :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _results(self, scheduler_instance_id): '''Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _broks(self, broker_name): '''Get the broks from the daemon This is used by the brokers to get the broks list of a daemon :return: Brok list serialized :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _events(self): '''Get the monitoring events from the daemon This is used by the arbiter to get the monitoring events from all its satellites :return: Events list serialized :rtype: list ''' pass
55
17
19
3
8
8
2
0.86
1
6
0
3
17
3
17
17
399
75
175
62
120
151
118
43
100
5
1
2
30
3,908
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/brok.py
alignak.brok.Brok
class Brok(object): """A Brok is a piece of information exported by Alignak to the Broker. Broker can do whatever he wants with it. A specific type of Brok exists when the type is monitoring_log. This Brok contains a monitoring event (alert, notification, ...) information Broks types: - log - monitoring_log - notification_raise - acknowledge_raise - downtime_raise - acknowledge_expire - downtime_expire - initial_host_status, initial_service_status, initial_contact_status - initial_broks_done - update_host_status, update_service_status, initial_contact_status - host_check_result, service_check_result - host_next_schedule, service_next_scheduler - host_snapshot, service_snapshot - unknown_host_check_result, unknown_service_check_result - program_status, initial program status - update_program_status, program status updated (raised on each scheduler loop) - clean_all_my_instance_id - new_conf """ my_type = 'brok' def __init__(self, params, parsing=True): # pylint: disable=unused-argument """ Note that the data attribute of a Brok is a serialized storage! :param params: initialization parameters :type params: dict :param parsing: not used but necessary for serialization/unserialization :type parsing: bool """ self.uuid = params.get('uuid', get_a_new_object_id()) self.prepared = params.get('prepared', False) self.creation_time = params.get('creation_time', time.time()) self.instance_id = params.get('instance_id', None) self.type = params.get('type', u'unknown') # Need to behave differently when un-serializing if 'uuid' in params: self.data = params['data'] return # serialize data as json self.data = params['data'] # self.data = serialize(params['data'], no_json=True, printing=False) def __repr__(self): ct = datetime.fromtimestamp(self.creation_time).strftime("%Y-%m-%d %H:%M:%S.%f") return "Brok %s (%s) '%s': %s" % (self.uuid, ct, self.type, self.data) __str__ = __repr__ def get_event(self): """This function returns an Event from a Brok If the type is monitoring_log then the Brok contains a monitoring event (alert, notification, ...) information. This function will return a tuple with the creation time, the level and the message information :return: tuple with date, level and message :rtype: tuple """ self.prepare() return self.creation_time, self.data['level'], self.data['message'] # pylint: disable=unused-argument def serialize(self, no_json=True, printing=False): """This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here we directly return all attributes :return: serialized Brok :rtype: dict """ return { "uuid": self.uuid, "prepared": self.prepared, "creation_time": self.creation_time, "instance_id": self.instance_id, "type": self.type, "data": serialize(self.data, no_json=False, printing=False) } def prepare(self): """Un-serialize data from data attribute and add instance_id key if necessary :return: the brok data part :rtype: dict """ # Maybe the Brok is an old daemon one or was already prepared # if so, the data is already ok if not getattr(self, 'prepared', False): if not isinstance(self.data, dict): # unserialize json data to prepare the brok self.data = unserialize(self.data, no_json=False, printing=False) # todo: check what for? and remove this... if self.instance_id: self.data['instance_id'] = self.instance_id self.prepared = True return self.data
class Brok(object): '''A Brok is a piece of information exported by Alignak to the Broker. Broker can do whatever he wants with it. A specific type of Brok exists when the type is monitoring_log. This Brok contains a monitoring event (alert, notification, ...) information Broks types: - log - monitoring_log - notification_raise - acknowledge_raise - downtime_raise - acknowledge_expire - downtime_expire - initial_host_status, initial_service_status, initial_contact_status - initial_broks_done - update_host_status, update_service_status, initial_contact_status - host_check_result, service_check_result - host_next_schedule, service_next_scheduler - host_snapshot, service_snapshot - unknown_host_check_result, unknown_service_check_result - program_status, initial program status - update_program_status, program status updated (raised on each scheduler loop) - clean_all_my_instance_id - new_conf ''' def __init__(self, params, parsing=True): ''' Note that the data attribute of a Brok is a serialized storage! :param params: initialization parameters :type params: dict :param parsing: not used but necessary for serialization/unserialization :type parsing: bool ''' pass def __repr__(self): pass def get_event(self): '''This function returns an Event from a Brok If the type is monitoring_log then the Brok contains a monitoring event (alert, notification, ...) information. This function will return a tuple with the creation time, the level and the message information :return: tuple with date, level and message :rtype: tuple ''' pass def serialize(self, no_json=True, printing=False): '''This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here we directly return all attributes :return: serialized Brok :rtype: dict ''' pass def prepare(self): '''Un-serialize data from data attribute and add instance_id key if necessary :return: the brok data part :rtype: dict ''' pass
6
5
15
2
7
6
2
1.58
1
2
0
0
5
6
5
5
113
20
36
15
30
57
29
15
23
4
1
2
9
3,909
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/alignak/http/scheduler_interface.py
alignak.http.scheduler_interface.SchedulerInterface
class SchedulerInterface(GenericInterface): """This module provide a specific HTTP interface for a Scheduler daemon.""" ##### # _____ _ # | ____| __ __ _ __ ___ ___ ___ __| | # | _| \ \/ / | '_ \ / _ \ / __| / _ \ / _` | # | |___ > < | |_) | | (_) | \__ \ | __/ | (_| | # |_____| /_/\_\ | .__/ \___/ |___/ \___| \__,_| # |_| ##### @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def object(self, o_type, o_name=None): """Get an object from the scheduler. The result is a serialized object which is a Json structure containing: - content: the serialized object content - __sys_python_module__: the python class of the returned object The Alignak unserialize function of the alignak.misc.serialization package allows to restore the initial object. .. code-block:: python from alignak.misc.serialization import unserialize from alignak.objects.hostgroup import Hostgroup raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts") print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() group = unserialize(object, True) assert group.__class__ == Hostgroup assert group.get_name() == 'allhosts' As an example: { "__sys_python_module__": "alignak.objects.hostgroup.Hostgroup", "content": { "uuid": "32248642-97dd-4f39-aaa2-5120112a765d", "name": "", "hostgroup_name": "allhosts", "use": [], "tags": [], "alias": "All Hosts", "notes": "", "definition_order": 100, "register": true, "unknown_members": [], "notes_url": "", "action_url": "", "imported_from": "unknown", "conf_is_correct": true, "configuration_errors": [], "configuration_warnings": [], "realm": "", "downtimes": {}, "hostgroup_members": [], "members": [ "553d47bc-27aa-426c-a664-49c4c0c4a249", "f88093ca-e61b-43ff-a41e-613f7ad2cea2", "df1e2e13-552d-43de-ad2a-fe80ad4ba979", "d3d667dd-f583-4668-9f44-22ef3dcb53ad" ] } } :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str """ o_found = self._get_object(o_type=o_type, o_name=o_name) if not o_found: return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type} return o_found @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def dump(self, o_name=None, details=False, raw=False): # pylint: disable=too-many-locals, too-many-branches """Dump an host (all hosts) from the scheduler. This gets the main host information from the scheduler. If details is set, then some more information are provided. This will not get all the host known attributes but only a reduced set that will inform about the host and its services status If raw is set the information are provided in two string lists formated as CSV strings. The first list element contains the hosts information and the second one contains the services information. If an host name is provided, this function will get only this host information, else all the scheduler hosts are returned. As an example (raw format): [ [ # Host information "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "BR_host;host;BR_host;1532451511;0;UP;HARD;False;False;Host assumed to be UP" ], [ # Services information "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "BR_host;service;dummy_critical;1532451490;2;CRITICAL;SOFT;False;False; BR_host-dummy_critical-2", "BR_host;service;BR_Simple_And;0;0;OK;HARD;False;False;", "BR_host;service;dummy_unreachable;1532451501;4;UNREACHABLE;SOFT;False;False; BR_host-dummy_unreachable-4", "BR_host;service;dummy_no_output;1532451495;0;OK;HARD;False;False; Service internal check result: 0", "BR_host;service;dummy_unknown;1532451475;3;UNKNOWN;SOFT;False;False; BR_host-dummy_unknown-3", "BR_host;service;dummy_echo;1532451501;0;OK;HARD;False;False;", "BR_host;service;dummy_warning;1532451492;1;WARNING;SOFT;False;False; BR_host-dummy_warning-1", "BR_host;service;dummy_random;1532451496;2;CRITICAL;SOFT;False;False; Service internal check result: 2", "BR_host;service;dummy_ok;1532451492;0;OK;HARD;False;False;BR_host" ] ] As an example (json format): { is_impact: false, name: "BR_host", state: "UP", last_check: 1532451811, state_type: "HARD", host: "BR_host", output: "Host assumed to be UP", services: [ { is_impact: false, name: "dummy_critical", state: "CRITICAL", last_check: 1532451790, state_type: "HARD", host: "BR_host", output: "BR_host-dummy_critical-2", state_id: 2, type: "service", is_problem: true }, { is_impact: true, name: "BR_Simple_And", state: "WARNING", last_check: 1532451775, state_type: "SOFT", host: "BR_host", output: "", state_id: 1, type: "service", is_problem: false }, .... .... }, state_id: 0, type: "host", is_problem: false } :param o_name: searched host name (or uuid) :type o_name: str :param details: less or more details :type details: bool :param raw: json or raw text format :type raw: bool :return: list of host and services information :rtype: list """ def get_host_info(host, services, details=False, raw=False): # pylint: disable=too-many-branches """Get the host information :return: None """ __props__ = [ 'last_check', 'state_id', 'state', 'state_type', 'is_problem', 'is_impact', 'output' ] if details: __props__ = __props__ + [ 'uuid', 'address', 'alias', 'business_impact', 'tags', 'customs', 'parents', 'long_output', 'perf_data', 'check_period', 'active_checks_enabled', 'passive_checks_enabled', 'check_freshness', 'freshness_threshold', 'freshness_state', 'get_overall_state', 'overall_state_id', 'state_id', 'state', 'state_type', 'passive_check', 'acknowledged', 'downtimed', 'next_check', 'last_time_up', 'last_time_down', 'last_time_ok', 'last_time_warning', 'last_time_critical', 'last_time_unknown', 'last_time_unreachable' ] host_data = OrderedDict({'type': 'host', 'host': host.get_name(), 'name': host.get_name()}) __header__ = ['type', 'host', 'name'] for key in __props__: if hasattr(host, key): __header__.append(key) if isinstance(getattr(host, key), collections.Callable): host_data[key] = getattr(host, key)(services) elif isinstance(getattr(host, key), set): host_data[key] = list(getattr(host, key)) else: host_data[key] = getattr(host, key) if raw: host_data['_header_host'] = __header__ host_data['services'] = [] __header__ = ['type', 'host', 'name'] for service in host.services: service = services[service] service_data = OrderedDict({'type': 'service', 'host': host.get_name(), 'name': service.get_name()}) for key in __props__: if hasattr(service, key): if key not in __header__: __header__.append(key) if isinstance(getattr(service, key), collections.Callable): service_data[key] = getattr(services, key)() elif isinstance(getattr(service, key), set): service_data[key] = list(getattr(service, key)) else: service_data[key] = getattr(service, key) host_data['services'].append(service_data) if raw: host_data['_header_service'] = __header__ return host_data if details is not False: details = bool(details) if raw is not False: raw = bool(raw) ls = [] try: hosts = self._get_objects('host') services = self._get_objects('service') if o_name is None: for host in hosts: ls.append(get_host_info( host, services, details=details, raw=raw)) else: # Perhaps we got an host uuid... host = hosts.find_by_name(o_name) if o_name in hosts: host = hosts[o_name] if host: ls.append(get_host_info( host, services, details=False, raw=raw)) except Exception as exp: # pylint: disable=broad-except return str(exp) + " / " + traceback.print_exc() if o_name and not host: return {'_status': u'ERR', '_message': u'Required host (%s) not found.' % o_name} if raw and ls: raw_ls_hosts = [] _header_host = ['type', 'host', 'name'] raw_ls_services = [] _header_service = ['type', 'host', 'name'] for item in ls: if len(item['_header_host']) > len(_header_host): _header_host = item['_header_host'] if len(item['_header_service']) > len(_header_service): _header_service = item['_header_service'] item.pop('_header_host') item.pop('_header_service') services = [] if 'services' in item: services = item.pop('services') # Write host line raw_ls_hosts.append( ';'.join("%s" % val for val in list(item.values()))) for service in services: raw_ls_services.append( ';'.join("%s" % val for val in list(service.values()))) raw_ls_hosts.insert(0, ';'.join(_header_host)) raw_ls_services.insert(0, ';'.join(_header_service)) return [raw_ls_hosts, raw_ls_services] return ls @cherrypy.expose @cherrypy.tools.json_out() def monitoring_problems(self): """Get Alignak scheduler monitoring status Returns an object with the scheduler livesynthesis and the known problems :return: scheduler live synthesis :rtype: dict """ if self.app.type != 'scheduler': return {'_status': u'ERR', '_message': u"This service is only available for a scheduler daemon"} res = self.identity() res.update(self.app.get_monitoring_problems()) return res ##### # ___ _ _ _ # |_ _| _ __ | |_ ___ _ __ _ __ __ _ | | ___ _ __ | | _ _ # | | | '_ \ | __| / _ \ | '__| | '_ \ / _` | | | / _ \ | '_ \ | | | | | | # | | | | | | | |_ | __/ | | | | | | | (_| | | | | (_) | | | | | | | | |_| | # |___| |_| |_| \__| \___| |_| |_| |_| \__,_| |_| \___/ |_| |_| |_| \__, | # |___/ ##### @cherrypy.expose @cherrypy.tools.json_out() def _wait_new_conf(self): """Ask the scheduler to drop its configuration and wait for a new one. This overrides the default method from GenericInterface :return: None """ # Stop the scheduling loop self.app.sched.stop_scheduling() super(SchedulerInterface, self)._wait_new_conf() @cherrypy.expose @cherrypy.tools.json_out() def _initial_broks(self, broker_name): """Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None """ with self.app.conf_lock: logger.info("A new broker just connected : %s", broker_name) return self.app.sched.fill_initial_broks(broker_name) @cherrypy.expose @cherrypy.tools.json_out() def _broks(self, broker_name): """Get the broks from a scheduler, used by brokers This is used by the brokers to get the broks list of a scheduler :param broker_name: broker name, used to filter broks :type broker_name: str :return: serialized brok list :rtype: dict """ logger.debug("Getting broks for %s from the scheduler", broker_name) for broker_link in list(self.app.brokers.values()): if broker_name == broker_link.name: break else: logger.warning( "Requesting broks for an unknown broker: %s", broker_name) return [] try: # Now get the broks for this specific broker with self.app.broks_lock: res = self.app.give_broks(broker_name) # Serialize but do not make it json encoded res = serialize(res, no_json=True) except Exception as exp: logger.warning( "Getting broks for %s from the scheduler", broker_name) logger.warning("Serializing: %s", res) logger.warning("_broks, exception: %s", exp) res = [] logger.debug("_broks, returns: %s", res) return res @cherrypy.expose @cherrypy.tools.json_out() def _checks(self, do_checks=False, do_actions=False, poller_tags=None, reactionner_tags=None, worker_name='none', module_types=None): """Get checks from scheduler, used by poller or reactionner when they are in active mode (passive = False) This function is not intended for external use. Let the poller and reactionner manage all this stuff by themselves ;) :param do_checks: used for poller to get checks :type do_checks: bool :param do_actions: used for reactionner to get actions :type do_actions: bool :param poller_tags: poller tags to filter on this poller :type poller_tags: list :param reactionner_tags: reactionner tags to filter on this reactionner :type reactionner_tags: list :param worker_name: Worker name asking (so that the scheduler add it to actions objects) :type worker_name: str :param module_types: Module type to filter actions/checks :type module_types: list :return: serialized check/action list :rtype: str """ if poller_tags is None: poller_tags = ['None'] if reactionner_tags is None: reactionner_tags = ['None'] if module_types is None: module_types = ['fork'] do_checks = (do_checks == 'True') do_actions = (do_actions == 'True') res = self.app.sched.get_to_run_checks(do_checks, do_actions, poller_tags, reactionner_tags, worker_name, module_types) return serialize(res, no_json=True) @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() # pylint: disable=arguments-differ def _results(self): """Put results to scheduler, used by poller and reactionner when they are in active mode (passive = False) This function is not intended for external use. Let the poller and reactionner manage all this stuff by themselves ;) Note: This function name should be changed because of a conflicting with the base class function :param from: poller/reactionner identification :type from: str :param results: list of actions results :type results: list :return: True :rtype: bool """ logger.debug("Put results to the scheduler") res = cherrypy.request.json results = unserialize(res['results'], no_json=True) if not results: logger.debug("-> no results") return True logger.debug("Got %d results from %s", len(results), res['from']) for result in results: logger.debug("-> result: %s", result) # Append to the scheduler result queue self.app.sched.waiting_results.put(result) return True @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _run_external_commands(self): """Post external_commands to scheduler (from arbiter) Wrapper to to app.sched.run_external_commands method :return: None """ commands = cherrypy.request.json with self.app.lock: self.app.sched.run_external_commands(commands['cmds']) def _get_objects(self, o_type): """Get an object list from the scheduler Returns None if the required object type (`o_type`) is not known or an exception is raised. Else returns the objects list :param o_type: searched object type :type o_type: str :return: objects list :rtype: alignak.objects.item.Items """ if o_type not in [t for t in self.app.sched.pushed_conf.types_creations]: return None try: _, _, strclss, _, _ = self.app.sched.pushed_conf.types_creations[o_type] o_list = getattr(self.app.sched, strclss) except Exception: # pylint: disable=broad-except return None return o_list def _get_object(self, o_type, o_name=None): """Get an object from the scheduler Returns None if the required object type (`o_type`) is not known. Else returns the serialized object if found. The object is searched first with o_name as its name and then with o_name as its uuid. :param o_type: searched object type :type o_type: str :param name: searched object name :type name: str :return: serialized object :rtype: str """ try: o_found = None o_list = self._get_objects(o_type) if o_list: if o_name is None: return serialize(o_list, no_json=True) if o_list else None # We expected a name... o_found = o_list.find_by_name(o_name) if not o_found: # ... but perhaps we got an object uuid o_found = o_list[o_name] except Exception: # pylint: disable=broad-except return None return serialize(o_found, no_json=True) if o_found else None
class SchedulerInterface(GenericInterface): '''This module provide a specific HTTP interface for a Scheduler daemon.''' @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def object(self, o_type, o_name=None): '''Get an object from the scheduler. The result is a serialized object which is a Json structure containing: - content: the serialized object content - __sys_python_module__: the python class of the returned object The Alignak unserialize function of the alignak.misc.serialization package allows to restore the initial object. .. code-block:: python from alignak.misc.serialization import unserialize from alignak.objects.hostgroup import Hostgroup raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts") print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() group = unserialize(object, True) assert group.__class__ == Hostgroup assert group.get_name() == 'allhosts' As an example: { "__sys_python_module__": "alignak.objects.hostgroup.Hostgroup", "content": { "uuid": "32248642-97dd-4f39-aaa2-5120112a765d", "name": "", "hostgroup_name": "allhosts", "use": [], "tags": [], "alias": "All Hosts", "notes": "", "definition_order": 100, "register": true, "unknown_members": [], "notes_url": "", "action_url": "", "imported_from": "unknown", "conf_is_correct": true, "configuration_errors": [], "configuration_warnings": [], "realm": "", "downtimes": {}, "hostgroup_members": [], "members": [ "553d47bc-27aa-426c-a664-49c4c0c4a249", "f88093ca-e61b-43ff-a41e-613f7ad2cea2", "df1e2e13-552d-43de-ad2a-fe80ad4ba979", "d3d667dd-f583-4668-9f44-22ef3dcb53ad" ] } } :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def dump(self, o_name=None, details=False, raw=False): '''Dump an host (all hosts) from the scheduler. This gets the main host information from the scheduler. If details is set, then some more information are provided. This will not get all the host known attributes but only a reduced set that will inform about the host and its services status If raw is set the information are provided in two string lists formated as CSV strings. The first list element contains the hosts information and the second one contains the services information. If an host name is provided, this function will get only this host information, else all the scheduler hosts are returned. As an example (raw format): [ [ # Host information "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "BR_host;host;BR_host;1532451511;0;UP;HARD;False;False;Host assumed to be UP" ], [ # Services information "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "BR_host;service;dummy_critical;1532451490;2;CRITICAL;SOFT;False;False; BR_host-dummy_critical-2", "BR_host;service;BR_Simple_And;0;0;OK;HARD;False;False;", "BR_host;service;dummy_unreachable;1532451501;4;UNREACHABLE;SOFT;False;False; BR_host-dummy_unreachable-4", "BR_host;service;dummy_no_output;1532451495;0;OK;HARD;False;False; Service internal check result: 0", "BR_host;service;dummy_unknown;1532451475;3;UNKNOWN;SOFT;False;False; BR_host-dummy_unknown-3", "BR_host;service;dummy_echo;1532451501;0;OK;HARD;False;False;", "BR_host;service;dummy_warning;1532451492;1;WARNING;SOFT;False;False; BR_host-dummy_warning-1", "BR_host;service;dummy_random;1532451496;2;CRITICAL;SOFT;False;False; Service internal check result: 2", "BR_host;service;dummy_ok;1532451492;0;OK;HARD;False;False;BR_host" ] ] As an example (json format): { is_impact: false, name: "BR_host", state: "UP", last_check: 1532451811, state_type: "HARD", host: "BR_host", output: "Host assumed to be UP", services: [ { is_impact: false, name: "dummy_critical", state: "CRITICAL", last_check: 1532451790, state_type: "HARD", host: "BR_host", output: "BR_host-dummy_critical-2", state_id: 2, type: "service", is_problem: true }, { is_impact: true, name: "BR_Simple_And", state: "WARNING", last_check: 1532451775, state_type: "SOFT", host: "BR_host", output: "", state_id: 1, type: "service", is_problem: false }, .... .... }, state_id: 0, type: "host", is_problem: false } :param o_name: searched host name (or uuid) :type o_name: str :param details: less or more details :type details: bool :param raw: json or raw text format :type raw: bool :return: list of host and services information :rtype: list ''' pass def get_host_info(host, services, details=False, raw=False): '''Get the host information :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def monitoring_problems(self): '''Get Alignak scheduler monitoring status Returns an object with the scheduler livesynthesis and the known problems :return: scheduler live synthesis :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _wait_new_conf(self): '''Ask the scheduler to drop its configuration and wait for a new one. This overrides the default method from GenericInterface :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _initial_broks(self, broker_name): '''Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _broks(self, broker_name): '''Get the broks from a scheduler, used by brokers This is used by the brokers to get the broks list of a scheduler :param broker_name: broker name, used to filter broks :type broker_name: str :return: serialized brok list :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _checks(self, do_checks=False, do_actions=False, poller_tags=None, reactionner_tags=None, worker_name='none', module_types=None): '''Get checks from scheduler, used by poller or reactionner when they are in active mode (passive = False) This function is not intended for external use. Let the poller and reactionner manage all this stuff by themselves ;) :param do_checks: used for poller to get checks :type do_checks: bool :param do_actions: used for reactionner to get actions :type do_actions: bool :param poller_tags: poller tags to filter on this poller :type poller_tags: list :param reactionner_tags: reactionner tags to filter on this reactionner :type reactionner_tags: list :param worker_name: Worker name asking (so that the scheduler add it to actions objects) :type worker_name: str :param module_types: Module type to filter actions/checks :type module_types: list :return: serialized check/action list :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _results(self): '''Put results to scheduler, used by poller and reactionner when they are in active mode (passive = False) This function is not intended for external use. Let the poller and reactionner manage all this stuff by themselves ;) Note: This function name should be changed because of a conflicting with the base class function :param from: poller/reactionner identification :type from: str :param results: list of actions results :type results: list :return: True :rtype: bool ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _run_external_commands(self): '''Post external_commands to scheduler (from arbiter) Wrapper to to app.sched.run_external_commands method :return: None ''' pass def _get_objects(self, o_type): '''Get an object list from the scheduler Returns None if the required object type (`o_type`) is not known or an exception is raised. Else returns the objects list :param o_type: searched object type :type o_type: str :return: objects list :rtype: alignak.objects.item.Items ''' pass def _get_objects(self, o_type): '''Get an object from the scheduler Returns None if the required object type (`o_type`) is not known. Else returns the serialized object if found. The object is searched first with o_name as its name and then with o_name as its uuid. :param o_type: searched object type :type o_type: str :param name: searched object name :type name: str :return: serialized object :rtype: str ''' pass
35
13
45
5
20
20
5
1.21
1
7
0
0
11
0
11
28
528
70
209
54
173
252
162
42
149
15
2
4
57
3,910
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/alignak/objects/satellitelink.py
alignak.objects.satellitelink.SatelliteLink
class SatelliteLink(Item): # pylint: disable=too-many-instance-attributes """SatelliteLink is a common Class for links between Arbiter and other satellites. Used by the Dispatcher object. """ # Next value used for auto generated instance_id _next_id = 1 # All the class properties that are 'to_send' are stored in the 'global' # configuration to be pushed to the satellite when the configuration is dispatched properties = Item.properties.copy() properties.update({ 'instance_id': StringProp(to_send=True), # When this property is set, the Arbiter will launch the corresponding daemon 'alignak_launched': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), # This property is set by the Arbiter when it detects that this daemon # is needed but not declared in the configuration 'missing_daemon': BoolProp(default=False, fill_brok=[FULL_STATUS]), # Sent to the satellites and used to check the managed configuration # Those are not to_send=True because they are updated by the configuration Dispatcher # and set when the daemon receives its configuration 'managed_conf_id': StringProp(default=u''), 'push_flavor': StringProp(default=u''), 'hash': StringProp(default=u''), # A satellite link has the type/name of the daemon it is related to 'type': StringProp(default=u'', fill_brok=[FULL_STATUS], to_send=True), 'name': StringProp(default=u'', fill_brok=[FULL_STATUS], to_send=True), # Listening interface and address used by the other daemons 'host': StringProp(default=u'0.0.0.0', to_send=True), 'address': StringProp(default=u'127.0.0.1', fill_brok=[ FULL_STATUS], to_send=True), 'active': BoolProp(default=True, fill_brok=[FULL_STATUS], to_send=True), 'short_timeout': IntegerProp(default=3, fill_brok=[FULL_STATUS], to_send=True), 'long_timeout': IntegerProp(default=120, fill_brok=[FULL_STATUS], to_send=True), # the delay (seconds) between two ping retries 'ping_period': IntegerProp(default=5), # The maximum number of retries before setting the daemon as dead 'max_check_attempts': IntegerProp(default=3, fill_brok=[FULL_STATUS]), # For a spare daemon link 'spare': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), 'spare_check_interval': IntegerProp(default=5, fill_brok=[FULL_STATUS]), 'spare_max_check_attempts': IntegerProp(default=3, fill_brok=[FULL_STATUS]), 'manage_sub_realms': BoolProp(default=True, fill_brok=[FULL_STATUS], to_send=True), 'manage_arbiters': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), 'modules': ListProp(default=[''], split_on_comma=True), 'polling_interval': IntegerProp(default=5, fill_brok=[FULL_STATUS], to_send=True), 'use_timezone': StringProp(default=u'NOTSET', to_send=True), 'realm': StringProp(default=u'', fill_brok=[FULL_STATUS], brok_transformation=get_obj_name_two_args_and_void), 'realm_name': StringProp(default=u''), 'satellite_map': DictProp(default={}, elts_prop=AddrProp, to_send=True, override=True), 'use_ssl': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), 'hard_ssl_name_check': BoolProp(default=True, fill_brok=[FULL_STATUS], to_send=True), 'passive': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), }) running_properties = Item.running_properties.copy() running_properties.update({ 'con': StringProp(default=None), 'uri': StringProp(default=None), 'reachable': # Can be reached - assumed True as default ;) BoolProp(default=False, fill_brok=[FULL_STATUS]), 'alive': # Is alive (attached process s launched...) BoolProp(default=False, fill_brok=[FULL_STATUS]), 'valid': # Is valid (the daemon is the expected one) BoolProp(default=False, fill_brok=[FULL_STATUS]), 'need_conf': # The daemon needs to receive a configuration BoolProp(default=True, fill_brok=[FULL_STATUS]), 'have_conf': # The daemon has received a configuration BoolProp(default=False, fill_brok=[FULL_STATUS]), 'stopping': # The daemon is requested to stop BoolProp(default=False, fill_brok=[FULL_STATUS]), 'running_id': # The running identifier of my related daemon FloatProp(default=0, fill_brok=[FULL_STATUS]), # the number of poll attempt from the arbiter dispatcher 'attempt': IntegerProp(default=0, fill_brok=[FULL_STATUS]), # the last connection attempt timestamp 'last_connection': IntegerProp(default=0, fill_brok=[FULL_STATUS]), # the number of failed attempt for the connection 'connection_attempt': IntegerProp(default=0, fill_brok=[FULL_STATUS]), 'last_check': IntegerProp(default=0, fill_brok=[FULL_STATUS]), 'cfg_managed': DictProp(default=None), 'cfg_to_manage': DictProp(default={}), 'configuration_sent': BoolProp(default=False), 'statistics': DictProp(default={}), }) def __init__(self, params, parsing=True): """Initialize a SatelliteLink If parsing is True, we are initializing from a configuration, else we are initializing from a copy of another satellite link data. This is used when the daemons receive their configuration from the arbiter. When initializing from an arbiter configuration, an instance_id property must exist else a LinkError exception is raised! If a satellite_map property exists in the provided parameters, it will update the default existing one """ super(SatelliteLink, self).__init__(params, parsing) logger.debug("Initialize a %s, params: %s", self.__class__.__name__, params) # My interface context self.broks = [] self.actions = {} self.wait_homerun = {} self.pushed_commands = [] self.init_running_properties() if parsing: # Create a new satellite link identifier self.instance_id = u'%s_%d' % ( self.__class__.__name__, self.__class__._next_id) self.__class__._next_id += 1 elif 'instance_id' not in params: raise LinkError("When not parsing a configuration, " "an instance_id must exist in the provided parameters") self.fill_default() # Hack for ascending compatibility with Shinken configuration try: # We received a configuration with a 'name' property... if self.name: setattr(self, "%s_name" % self.type, self.name) else: # We received a configuration without a 'name' property... old form! if getattr(self, "%s_name" % self.type, None): setattr(self, 'name', getattr(self, "%s_name" % self.type)) else: self.name = "Unnamed %s" % self.type setattr(self, "%s_name" % self.type, self.name) except KeyError: setattr(self, 'name', getattr(self, "%s_name" % self.type)) # Initialize our satellite map, and update if required self.set_arbiter_satellite_map(params.get('satellite_map', {})) self.cfg = { 'self_conf': {}, 'schedulers': {}, 'arbiters': {} } # Create the daemon connection self.create_connection() def __repr__(self): # pragma: no cover return '<%s - %s/%s, %s//%s:%s, rid: %s, spare: %s, realm: %s, sub-realms: %s, ' \ 'managing: %s (%s) />' \ % (self.instance_id, self.type, self.name, self.scheme, self.address, self.port, self.running_id, self.spare, self.realm, self.manage_sub_realms, self.managed_conf_id, self.push_flavor) __str__ = __repr__ @property def scheme(self): """Daemon interface scheme :return: http or https if the daemon uses SSL :rtype: str """ _scheme = 'http' if self.use_ssl: _scheme = 'https' return _scheme @staticmethod def get_a_satellite_link(sat_type, sat_dict): """Get a SatelliteLink object for a given satellite type and a dictionary :param sat_type: type of satellite :param sat_dict: satellite configuration data :return: """ cls = get_alignak_class('alignak.objects.%slink.%sLink' % (sat_type, sat_type.capitalize())) return cls(params=sat_dict, parsing=False) def get_livestate(self): """Get the SatelliteLink live state. The live state is a tuple information containing a state identifier and a message, where: state is: - 0 for an up and running satellite - 1 if the satellite is not reachale - 2 if the satellite is dead - 3 else (not active) :return: tuple """ livestate = 0 if self.active: if not self.reachable: livestate = 1 elif not self.alive: livestate = 2 else: livestate = 3 livestate_output = "%s/%s is %s" % (self.type, self.name, [ "up and running.", "warning because not reachable.", "critical because not responding.", "not active by configuration." ][livestate]) return (livestate, livestate_output) def set_arbiter_satellite_map(self, satellite_map=None): """ satellite_map is the satellites map in current context: - A SatelliteLink is owned by an Arbiter - satellite_map attribute of a SatelliteLink is the map defined IN THE satellite configuration but for creating connections, we need to have the satellites map from the Arbiter point of view :return: None """ self.satellite_map = { 'address': self.address, 'port': self.port, 'use_ssl': self.use_ssl, 'hard_ssl_name_check': self.hard_ssl_name_check } if satellite_map: self.satellite_map.update(satellite_map) def get_and_clear_context(self): """Get and clean all of our broks, actions, external commands and homerun :return: list of all broks of the satellite link :rtype: list """ res = (self.broks, self.actions, self.wait_homerun, self.pushed_commands) self.broks = [] self.actions = {} self.wait_homerun = {} self.pushed_commands = [] return res def get_and_clear_broks(self): """Get and clean all of our broks :return: list of all broks of the satellite link :rtype: list """ res = self.broks self.broks = [] return res def prepare_for_conf(self): """Initialize the pushed configuration dictionary with the inner properties that are to be propagated to the satellite link. :return: None """ logger.debug("- preparing: %s", self) self.cfg = { 'self_conf': self.give_satellite_cfg(), 'schedulers': {}, 'arbiters': {} } logger.debug("- prepared: %s", self.cfg) def give_satellite_cfg(self): """Get the default information for a satellite. Overridden by the specific satellites links :return: dictionary of information common to all the links :rtype: dict """ # All the satellite link class properties that are 'to_send' are stored in a # dictionary to be pushed to the satellite when the configuration is dispatched res = {} properties = self.__class__.properties for prop, entry in list(properties.items()): if hasattr(self, prop) and entry.to_send: res[prop] = getattr(self, prop) return res def give_satellite_json(self): """Get the json information for a satellite. This to provide information that will be exposed by a daemon on its HTTP interface. :return: dictionary of information common to all the links :rtype: dict """ daemon_properties = ['type', 'name', 'uri', 'spare', 'configuration_sent', 'realm_name', 'manage_sub_realms', 'active', 'reachable', 'alive', 'passive', 'last_check', 'polling_interval', 'max_check_attempts'] (livestate, livestate_output) = self.get_livestate() res = { "livestate": livestate, "livestate_output": livestate_output } for sat_prop in daemon_properties: res[sat_prop] = getattr(self, sat_prop, 'not_yet_defined') return res def manages(self, cfg_part): """Tell if the satellite is managing this configuration part The managed configuration is formed as a dictionary indexed on the link instance_id: { u'SchedulerLink_1': { u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'managed_conf_id': [u'Config_1'] } } Note that the managed configuration is a string array rather than a simple string... no special for this reason, probably due to the serialization when the configuration is pushed :/ :param cfg_part: configuration part as prepare by the Dispatcher :type cfg_part: Conf :return: True if the satellite manages this configuration :rtype: bool """ logger.debug("Do I (%s/%s) manage: %s, my managed configuration(s): %s", self.type, self.name, cfg_part, self.cfg_managed) # If we do not yet manage a configuration if not self.cfg_managed: logger.info( "I (%s/%s) do not manage (yet) any configuration!", self.type, self.name) return False # Check in the schedulers list configurations for managed_cfg in list(self.cfg_managed.values()): # If not even the cfg_id in the managed_conf, bail out if managed_cfg['managed_conf_id'] == cfg_part.instance_id \ and managed_cfg['push_flavor'] == cfg_part.push_flavor: logger.debug("I do manage this configuration: %s", cfg_part) break else: logger.warning("I (%s/%s) do not manage this configuration: %s", self.type, self.name, cfg_part) return False return True def create_connection(self): """Initialize HTTP connection with a satellite (con attribute) and set its uri attribute This is called on the satellite link initialization :return: None """ # Create the HTTP client for the connection try: self.con = HTTPClient(address=self.satellite_map['address'], port=self.satellite_map['port'], short_timeout=self.short_timeout, long_timeout=self.long_timeout, use_ssl=self.satellite_map['use_ssl'], strong_ssl=self.satellite_map['hard_ssl_name_check']) self.uri = self.con.uri except HTTPClientException as exp: # logger.error("Error with '%s' when creating client: %s", self.name, str(exp)) # Set the satellite as dead self.set_dead() raise LinkError( "Error with '%s' when creating client: %s" % (self.name, str(exp))) def set_alive(self): """Set alive, reachable, and reset attempts. If we change state, raise a status brok update alive, means the daemon is prenset in the system reachable, means that the HTTP connection is valid With this function we confirm that the daemon is reachable and, thus, we assume it is alive! :return: None """ was_alive = self.alive self.alive = True self.reachable = True self.attempt = 0 # We came from dead to alive! We must propagate the good news if not was_alive: logger.info("Setting %s satellite as alive :)", self.name) self.broks.append(self.get_update_status_brok()) def set_dead(self): """Set the satellite into dead state: If we change state, raise a status brok update :return:None """ was_alive = self.alive self.alive = False self.reachable = False self.attempt = 0 # We will have to create a new connection... self.con = None # We are dead now! We must propagate the sad news... if was_alive and not self.stopping: logger.warning("Setting the satellite %s as dead :(", self.name) self.broks.append(self.get_update_status_brok()) def add_failed_check_attempt(self, reason=''): """Set the daemon as unreachable and add a failed attempt if we reach the maximum attempts, set the daemon as dead :param reason: the reason of adding an attempts (stack trace sometimes) :type reason: str :return: None """ self.reachable = False self.attempt = self.attempt + 1 logger.debug("Failed attempt for %s (%d/%d), reason: %s", self.name, self.attempt, self.max_check_attempts, reason) # Don't need to warn again and again if the satellite is already dead # Only warn when it is alive if self.alive: if not self.stopping: logger.warning("Add failed attempt for %s (%d/%d) - %s", self.name, self.attempt, self.max_check_attempts, reason) else: logger.info("Stopping... failed attempt for %s (%d/%d) - also probably stopping", self.name, self.attempt, self.max_check_attempts) # If we reached the maximum attempts, set the daemon as dead if self.attempt >= self.max_check_attempts: if not self.stopping: logger.warning("Set %s as dead, too much failed attempts (%d), last problem is: %s", self.name, self.max_check_attempts, reason) else: logger.info("Stopping... set %s as dead, too much failed attempts (%d)", self.name, self.max_check_attempts) self.set_dead() def valid_connection(*outer_args, **outer_kwargs): # pylint: disable=unused-argument, no-method-argument """Check if the daemon connection is established and valid""" def decorator(func): # pylint: disable=missing-docstring def decorated(*args, **kwargs): # pylint: disable=missing-docstring # outer_args and outer_kwargs are the decorator arguments # args and kwargs are the decorated function arguments link = args[0] if not link.con: raise LinkError( "The connection is not created for %s" % link.name) if not link.running_id: raise LinkError( "The connection is not initialized for %s" % link.name) return func(*args, **kwargs) return decorated return decorator def communicate(*outer_args, **outer_kwargs): # pylint: disable=unused-argument, no-method-argument """Check if the daemon connection is authorized and valid""" def decorator(func): # pylint: disable=missing-docstring def decorated(*args, **kwargs): # pylint: disable=missing-docstring # outer_args and outer_kwargs are the decorator arguments # args and kwargs are the decorated function arguments fn_name = func.__name__ link = args[0] if not link.alive: logger.warning("%s is not alive for %s", link.name, fn_name) return None try: if not link.reachable: raise LinkError("The %s %s is not reachable, %s" % (link.type, link.name, fn_name)) logger.debug("[%s] Calling: %s, %s, %s", link.name, fn_name, args, kwargs) return func(*args, **kwargs) except HTTPClientConnectionException as exp: # A Connection error is raised when the daemon connection cannot be established # No way with the configuration parameters! if not link.stopping: logger.warning("A daemon (%s/%s) that we must be related with " "cannot be connected: %s", link.type, link.name, exp) else: logger.info("Stopping... daemon (%s/%s) cannot be connected. " "It is also probably stopping or yet stopped.", link.type, link.name) link.set_dead() except (LinkError, HTTPClientTimeoutException) as exp: link.add_failed_check_attempt("Connection timeout " "with '%s': %s" % (fn_name, str(exp))) return False except HTTPClientDataException as exp: # A Data error is raised when the daemon HTTP response is not 200! # No way with the communication if some problems exist in the daemon interface! # Abort all err = "Some daemons that we must be related with " \ "have some interface problems. Sorry, I bail out! Problems are: %s" \ % str(exp) logger.error(err) os.sys.exit(err) except HTTPClientException as exp: link.add_failed_check_attempt( "Error with '%s': %s" % (fn_name, str(exp))) return None return decorated return decorator @communicate() def get_running_id(self): """Send a HTTP request to the satellite (GET /identity) Used to get the daemon running identifier that allows to know if the daemon got restarted This is called on connection initialization or re-connection If the daemon is notreachable, this function will raise an exception and the caller will receive a False as return :return: Boolean indicating if the running id was received :type: bool """ former_running_id = self.running_id logger.info(" get the running identifier for %s %s.", self.type, self.name) # An exception is raised in this function if the daemon is not reachable self.running_id = self.con.get('identity') if isinstance(self.running_id, dict): self.running_id = self.running_id['running_id'] if former_running_id == 0: if self.running_id: logger.info(" -> got: %s.", self.running_id) former_running_id = self.running_id # If the daemon has just started or has been restarted: it has a new running_id. if former_running_id != self.running_id: if former_running_id: logger.info(" -> The %s %s running identifier changed: %s. " "The daemon was certainly restarted!", self.type, self.name, self.running_id) # So we clear all verifications, they are obsolete now. logger.info("The running id of the %s %s changed (%s), " "we must clear its context.", self.type, self.name, self.running_id) (_, _, _, _) = self.get_and_clear_context() # Set the daemon as alive self.set_alive() return True @valid_connection() @communicate() def stop_request(self, stop_now=False): """Send a stop request to the daemon :param stop_now: stop now or go to stop wait mode :type stop_now: bool :return: the daemon response (True) """ logger.debug("Sending stop request to %s, stop now: %s", self.name, stop_now) res = self.con.get( 'stop_request', {'stop_now': '1' if stop_now else '0'}) return res @valid_connection() @communicate() def update_infos(self, forced=False, test=False): """Update satellite info each self.polling_interval seconds so we smooth arbiter actions for just useful actions. Raise a satellite update status Brok If forced is True, then ignore the ping period. This is used when the configuration has not yet been dispatched to the Arbiter satellites. If test is True, do not really ping the daemon (useful for the unit tests only) :param forced: ignore the ping smoothing :type forced: bool :param test: :type test: bool :return: None if the last request is too recent, False if a timeout was raised during the request, else the managed configurations dictionary """ logger.debug("Update informations, forced: %s", forced) # First look if it's not too early to ping now = time.time() if not forced and self.last_check and self.last_check + self.polling_interval > now: logger.debug("Too early to ping %s, ping period is %ds!, last check: %d, now: %d", self.name, self.polling_interval, self.last_check, now) return None self.get_conf(test=test) # Update the daemon last check timestamp self.last_check = time.time() # Update the state of this element self.broks.append(self.get_update_status_brok()) return self.cfg_managed @valid_connection() @communicate() def get_daemon_stats(self, details=False): """Send a HTTP request to the satellite (GET /get_daemon_stats) :return: Daemon statistics :rtype: dict """ logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable) return self.con.get('stats%s' % ('?details=1' if details else '')) @valid_connection() @communicate() def get_initial_broks(self, broker_name): """Send a HTTP request to the satellite (GET /_initial_broks) Used to build the initial broks for a broker connecting to a scheduler :param broker_name: the concerned broker name :type broker_name: str :return: Boolean indicating if the running id changed :type: bool """ logger.debug("Getting initial broks for %s, %s %s", self.name, self.alive, self.reachable) return self.con.get('_initial_broks', {'broker_name': broker_name}, wait=True) @valid_connection() @communicate() def wait_new_conf(self): """Send a HTTP request to the satellite (GET /wait_new_conf) :return: True if wait new conf, otherwise False :rtype: bool """ logger.debug("Wait new configuration for %s, %s %s", self.name, self.alive, self.reachable) return self.con.get('_wait_new_conf') @valid_connection() @communicate() def put_conf(self, configuration, test=False): """Send the configuration to the satellite HTTP request to the satellite (POST /push_configuration) If test is True, store the configuration internally :param configuration: The conf to send (data depend on the satellite) :type configuration: :return: None """ logger.debug("Sending configuration to %s, %s %s", self.name, self.alive, self.reachable) # ---------- if test: setattr(self, 'unit_test_pushed_configuration', configuration) # print("*** unit tests - sent configuration %s: %s" % (self.name, configuration)) return True # ---------- return self.con.post('_push_configuration', {'conf': configuration}, wait=True) @valid_connection() @communicate() def has_a_conf(self, magic_hash=None): # pragma: no cover """Send a HTTP request to the satellite (GET /have_conf) Used to know if the satellite has a conf :param magic_hash: Config hash. Only used for HA arbiter communication :type magic_hash: int :return: Boolean indicating if the satellite has a (specific) configuration :type: bool """ logger.debug("Have a configuration for %s, %s %s", self.name, self.alive, self.reachable) self.have_conf = self.con.get('_have_conf', {'magic_hash': magic_hash}) return self.have_conf @valid_connection() @communicate() def get_conf(self, test=False): """Send a HTTP request to the satellite (GET /managed_configurations) and update the cfg_managed attribute with the new information Set to {} on failure the managed configurations are a dictionary which keys are the scheduler link instance id and the values are the push_flavor If test is True, returns the unit test internally stored configuration Returns False if a timeout is raised :return: see @communicate, or the managed configuration """ logger.debug("Get managed configuration for %s, %s %s", self.name, self.alive, self.reachable) # ---------- if test: self.cfg_managed = {} self.have_conf = True logger.debug("Get managed configuration test ...") if getattr(self, 'unit_test_pushed_configuration', None) is not None: # Note this is a dict not a SatelliteLink object ! for scheduler_link in self.unit_test_pushed_configuration['schedulers'].values(): self.cfg_managed[scheduler_link['instance_id']] = { 'hash': scheduler_link['hash'], 'push_flavor': scheduler_link['push_flavor'], 'managed_conf_id': scheduler_link['managed_conf_id'] } # print("*** unit tests - get managed configuration %s: %s" # % (self.name, self.cfg_managed)) # ---------- else: self.cfg_managed = self.con.get('managed_configurations') logger.debug("My (%s) fresh managed configuration: %s", self.name, self.cfg_managed) self.have_conf = (self.cfg_managed != {}) return self.cfg_managed @valid_connection() @communicate() def push_broks(self, broks): """Send a HTTP request to the satellite (POST /push_broks) Send broks to the satellite :param broks: Brok list to send :type broks: list :return: True on success, False on failure :rtype: bool """ logger.debug("[%s] Pushing %d broks", self.name, len(broks)) return self.con.post('_push_broks', {'broks': broks}, wait=True) @valid_connection() @communicate() def push_actions(self, actions, scheduler_instance_id): """Post the actions to execute to the satellite. Indeed, a scheduler post its checks to a poller and its actions to a reactionner. :param actions: Action list to send :type actions: list :param scheduler_instance_id: Scheduler instance identifier :type scheduler_instance_id: uuid :return: True on success, False on failure :rtype: bool """ logger.debug("Pushing %d actions from %s", len(actions), scheduler_instance_id) for action in actions: logger.debug("- %s", action) res = serialize(actions, no_json=True) return self.con.post('_push_actions', {'actions': res, 'scheduler_instance_id': scheduler_instance_id}, wait=True) @valid_connection() @communicate() def push_results(self, results, scheduler_name): """Send a HTTP request to the satellite (POST /_results) Send actions results to the satellite :param results: Results list to send :type results: list :param scheduler_name: Scheduler name :type scheduler_name: uuid :return: True on success, False on failure :rtype: bool """ logger.debug("Pushing %d results to %s", len(results), scheduler_name) res = serialize(results, no_json=True) result = self.con.post('_results', {'results': res, 'from': scheduler_name}, wait=True) return result @valid_connection() @communicate() def push_external_commands(self, commands): """Send a HTTP request to the satellite (POST /r_un_external_commands) to send the external commands to the satellite :param results: Results list to send :type results: list :return: True on success, False on failure :rtype: bool """ logger.debug("Pushing %d external commands", len(commands)) return self.con.post('_run_external_commands', {'cmds': commands}, wait=True) @valid_connection() @communicate() def get_external_commands(self): """Send a HTTP request to the satellite (GET /_external_commands) to get the external commands from the satellite. :return: External Command list on success, [] on failure :rtype: list """ res = self.con.get('_external_commands', wait=False) logger.debug("Got %d external commands from %s: %s", len(res), self.name, res) return unserialize(res, True) @valid_connection() @communicate() def get_broks(self, broker_name): """Send a HTTP request to the satellite (GET /_broks) Get broks from the satellite. Un-serialize data received. :param broker_name: the concerned broker link :type broker_name: BrokerLink :return: Broks list on success, [] on failure :rtype: list """ res = self.con.get('_broks', {'broker_name': broker_name}, wait=False) logger.debug("Got broks from %s: %s", self.name, res) return unserialize(res, True) @valid_connection() @communicate() def get_events(self): """Send a HTTP request to the satellite (GET /_events) Get monitoring events from the satellite. :return: Broks list on success, [] on failure :rtype: list """ logger.debug("Getting events from %s", self.name) res = self.con.get('_events', wait=False) logger.debug("Got events from %s: %s", self.name, res) return unserialize(res, True) @valid_connection() def get_results(self, scheduler_instance_id): """Send a HTTP request to the satellite (GET /_results) Get actions results from satellite (only passive satellites expose this method. :param scheduler_instance_id: scheduler instance identifier :type scheduler_instance_id: str :return: Results list on success, [] on failure :rtype: list """ res = self.con.get( '_results', {'scheduler_instance_id': scheduler_instance_id}, wait=True) logger.debug("Got %d results from %s: %s", len(res), self.name, res) return res @valid_connection() def get_actions(self, params): """Send a HTTP request to the satellite (GET /_checks) Get actions from the scheduler. Un-serialize the received data. :param params: the request parameters :type params: str :return: Actions list on success, [] on failure :rtype: list """ res = self.con.get('_checks', params, wait=True) logger.debug("Got checks to execute from %s: %s", self.name, res) return unserialize(res, True)
class SatelliteLink(Item): '''SatelliteLink is a common Class for links between Arbiter and other satellites. Used by the Dispatcher object. ''' def __init__(self, params, parsing=True): '''Initialize a SatelliteLink If parsing is True, we are initializing from a configuration, else we are initializing from a copy of another satellite link data. This is used when the daemons receive their configuration from the arbiter. When initializing from an arbiter configuration, an instance_id property must exist else a LinkError exception is raised! If a satellite_map property exists in the provided parameters, it will update the default existing one ''' pass def __repr__(self): pass @property def scheme(self): '''Daemon interface scheme :return: http or https if the daemon uses SSL :rtype: str ''' pass @staticmethod def get_a_satellite_link(sat_type, sat_dict): '''Get a SatelliteLink object for a given satellite type and a dictionary :param sat_type: type of satellite :param sat_dict: satellite configuration data :return: ''' pass def get_livestate(self): '''Get the SatelliteLink live state. The live state is a tuple information containing a state identifier and a message, where: state is: - 0 for an up and running satellite - 1 if the satellite is not reachale - 2 if the satellite is dead - 3 else (not active) :return: tuple ''' pass def set_arbiter_satellite_map(self, satellite_map=None): ''' satellite_map is the satellites map in current context: - A SatelliteLink is owned by an Arbiter - satellite_map attribute of a SatelliteLink is the map defined IN THE satellite configuration but for creating connections, we need to have the satellites map from the Arbiter point of view :return: None ''' pass def get_and_clear_context(self): '''Get and clean all of our broks, actions, external commands and homerun :return: list of all broks of the satellite link :rtype: list ''' pass def get_and_clear_broks(self): '''Get and clean all of our broks :return: list of all broks of the satellite link :rtype: list ''' pass def prepare_for_conf(self): '''Initialize the pushed configuration dictionary with the inner properties that are to be propagated to the satellite link. :return: None ''' pass def give_satellite_cfg(self): '''Get the default information for a satellite. Overridden by the specific satellites links :return: dictionary of information common to all the links :rtype: dict ''' pass def give_satellite_json(self): '''Get the json information for a satellite. This to provide information that will be exposed by a daemon on its HTTP interface. :return: dictionary of information common to all the links :rtype: dict ''' pass def manages(self, cfg_part): '''Tell if the satellite is managing this configuration part The managed configuration is formed as a dictionary indexed on the link instance_id: { u'SchedulerLink_1': { u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'managed_conf_id': [u'Config_1'] } } Note that the managed configuration is a string array rather than a simple string... no special for this reason, probably due to the serialization when the configuration is pushed :/ :param cfg_part: configuration part as prepare by the Dispatcher :type cfg_part: Conf :return: True if the satellite manages this configuration :rtype: bool ''' pass def create_connection(self): '''Initialize HTTP connection with a satellite (con attribute) and set its uri attribute This is called on the satellite link initialization :return: None ''' pass def set_alive(self): '''Set alive, reachable, and reset attempts. If we change state, raise a status brok update alive, means the daemon is prenset in the system reachable, means that the HTTP connection is valid With this function we confirm that the daemon is reachable and, thus, we assume it is alive! :return: None ''' pass def set_dead(self): '''Set the satellite into dead state: If we change state, raise a status brok update :return:None ''' pass def add_failed_check_attempt(self, reason=''): '''Set the daemon as unreachable and add a failed attempt if we reach the maximum attempts, set the daemon as dead :param reason: the reason of adding an attempts (stack trace sometimes) :type reason: str :return: None ''' pass def valid_connection(*outer_args, **outer_kwargs): '''Check if the daemon connection is established and valid''' pass def decorator(func): pass def decorated(*args, **kwargs): pass def communicate(*outer_args, **outer_kwargs): '''Check if the daemon connection is authorized and valid''' pass def decorator(func): pass def decorated(*args, **kwargs): pass @communicate() def get_running_id(self): '''Send a HTTP request to the satellite (GET /identity) Used to get the daemon running identifier that allows to know if the daemon got restarted This is called on connection initialization or re-connection If the daemon is notreachable, this function will raise an exception and the caller will receive a False as return :return: Boolean indicating if the running id was received :type: bool ''' pass @valid_connection() @communicate() def stop_request(self, stop_now=False): '''Send a stop request to the daemon :param stop_now: stop now or go to stop wait mode :type stop_now: bool :return: the daemon response (True) ''' pass @valid_connection() @communicate() def update_infos(self, forced=False, test=False): '''Update satellite info each self.polling_interval seconds so we smooth arbiter actions for just useful actions. Raise a satellite update status Brok If forced is True, then ignore the ping period. This is used when the configuration has not yet been dispatched to the Arbiter satellites. If test is True, do not really ping the daemon (useful for the unit tests only) :param forced: ignore the ping smoothing :type forced: bool :param test: :type test: bool :return: None if the last request is too recent, False if a timeout was raised during the request, else the managed configurations dictionary ''' pass @valid_connection() @communicate() def get_daemon_stats(self, details=False): '''Send a HTTP request to the satellite (GET /get_daemon_stats) :return: Daemon statistics :rtype: dict ''' pass @valid_connection() @communicate() def get_initial_broks(self, broker_name): '''Send a HTTP request to the satellite (GET /_initial_broks) Used to build the initial broks for a broker connecting to a scheduler :param broker_name: the concerned broker name :type broker_name: str :return: Boolean indicating if the running id changed :type: bool ''' pass @valid_connection() @communicate() def wait_new_conf(self): '''Send a HTTP request to the satellite (GET /wait_new_conf) :return: True if wait new conf, otherwise False :rtype: bool ''' pass @valid_connection() @communicate() def put_conf(self, configuration, test=False): '''Send the configuration to the satellite HTTP request to the satellite (POST /push_configuration) If test is True, store the configuration internally :param configuration: The conf to send (data depend on the satellite) :type configuration: :return: None ''' pass @valid_connection() @communicate() def has_a_conf(self, magic_hash=None): '''Send a HTTP request to the satellite (GET /have_conf) Used to know if the satellite has a conf :param magic_hash: Config hash. Only used for HA arbiter communication :type magic_hash: int :return: Boolean indicating if the satellite has a (specific) configuration :type: bool ''' pass @valid_connection() @communicate() def get_conf(self, test=False): '''Send a HTTP request to the satellite (GET /managed_configurations) and update the cfg_managed attribute with the new information Set to {} on failure the managed configurations are a dictionary which keys are the scheduler link instance id and the values are the push_flavor If test is True, returns the unit test internally stored configuration Returns False if a timeout is raised :return: see @communicate, or the managed configuration ''' pass @valid_connection() @communicate() def push_broks(self, broks): '''Send a HTTP request to the satellite (POST /push_broks) Send broks to the satellite :param broks: Brok list to send :type broks: list :return: True on success, False on failure :rtype: bool ''' pass @valid_connection() @communicate() def push_actions(self, actions, scheduler_instance_id): '''Post the actions to execute to the satellite. Indeed, a scheduler post its checks to a poller and its actions to a reactionner. :param actions: Action list to send :type actions: list :param scheduler_instance_id: Scheduler instance identifier :type scheduler_instance_id: uuid :return: True on success, False on failure :rtype: bool ''' pass @valid_connection() @communicate() def push_results(self, results, scheduler_name): '''Send a HTTP request to the satellite (POST /_results) Send actions results to the satellite :param results: Results list to send :type results: list :param scheduler_name: Scheduler name :type scheduler_name: uuid :return: True on success, False on failure :rtype: bool ''' pass @valid_connection() @communicate() def push_external_commands(self, commands): '''Send a HTTP request to the satellite (POST /r_un_external_commands) to send the external commands to the satellite :param results: Results list to send :type results: list :return: True on success, False on failure :rtype: bool ''' pass @valid_connection() @communicate() def get_external_commands(self): '''Send a HTTP request to the satellite (GET /_external_commands) to get the external commands from the satellite. :return: External Command list on success, [] on failure :rtype: list ''' pass @valid_connection() @communicate() def get_broks(self, broker_name): '''Send a HTTP request to the satellite (GET /_broks) Get broks from the satellite. Un-serialize data received. :param broker_name: the concerned broker link :type broker_name: BrokerLink :return: Broks list on success, [] on failure :rtype: list ''' pass @valid_connection() @communicate() def get_events(self): '''Send a HTTP request to the satellite (GET /_events) Get monitoring events from the satellite. :return: Broks list on success, [] on failure :rtype: list ''' pass @valid_connection() def get_results(self, scheduler_instance_id): '''Send a HTTP request to the satellite (GET /_results) Get actions results from satellite (only passive satellites expose this method. :param scheduler_instance_id: scheduler instance identifier :type scheduler_instance_id: str :return: Results list on success, [] on failure :rtype: list ''' pass @valid_connection() def get_actions(self, params): '''Send a HTTP request to the satellite (GET /_checks) Get actions from the scheduler. Un-serialize the received data. :param params: the request parameters :type params: str :return: Actions list on success, [] on failure :rtype: list ''' pass
76
36
20
3
11
8
2
0.65
1
11
6
6
35
19
36
70
917
145
475
119
399
310
276
96
235
8
3
3
85
3,911
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_actions.py
tests.test_actions.TestAction
class TestAction(AlignakTest): def setUp(self): super(TestAction, self).setUp() # Create and test an action object a = Action() assert a.env == {} assert a.timeout == 10 assert a.exit_status == 3 def wait_finished(self, a, size=8192, timeout=20): start = time.time() while True: # Do the job if a.status == 'launched': a.check_finished(size) time.sleep(0.01) if a.status != 'launched': return # 20s timeout if time.time() - start > timeout: print("Timeout: %ss!" % timeout) return def test_action_creation(self): """ Test action object creation / initialization :return: None """ # Create an action without any parameters # Will fill only the default action properties action = Action() for prop in list(action.__class__.properties.keys()): # command has no default value if prop not in ['command']: assert hasattr(action, prop) # # Serialize an action # An action object is not serializable! Should it be? # When a poller/reactionner gets actions, the whole list is serialized # action_serialized = serialize(action) # print(action_serialized) # Create a check without any parameters # Will fill only the default action properties check = Check() for prop in list(check.__class__.properties.keys()): # command has no default value if prop not in ['command']: assert hasattr(check, prop) # # Serialize a check # A check object is not serializable! Should it be? # check_serialized = serialize(check) # print(check_serialized) # Create an event_handler without any parameters # Will fill only the default action properties event_handler = EventHandler() for prop in list(event_handler.__class__.properties.keys()): # command has no default value if prop not in ['command']: assert hasattr(event_handler, prop) # # Serialize an event_handler # An event handler object is not serializable! Should it be? # event_handler_serialized = serialize(event_handler) # print(event_handler_serialized) # Create an action with parameters parameters = { 'status': 'planned', 'ref': 'host_uuid', 'ref_type': 'host', 'command': 'my_command.sh', 'check_time': 0, 'last_poll': 0, 'exit_status': 0, 'execution_time': 0.0, 'wait_time': 0.001, 'creation_time': time.time(), 'my_worker': 'test_worker', 'my_scheduler': 'test_scheduler', 'timeout': 100, 't_to_go': 0.0, 'is_a': 'action', 'reactionner_tag': 'tag', 'module_type': 'nrpe-booster', 'env': {}, 'log_actions': True } # Will fill the action properties with the parameters action = Action(parameters) # And it will add an uuid parameters['uuid'] = action.uuid # Those parameters are missing in the provided parameters but they will exist in the object parameters.update({ 'u_time': 0.0, 's_time': 0.0, '_in_timeout': False, '_is_orphan': False, 'type': '', 'output': '', 'long_output': '', 'perf_data': '', 'internal': False }) # creation_time and log_actions will not be modified! They are set # only if they do not yet exist assert action.__dict__ == parameters # Create a check with parameters parameters = { 'check_time': 0, 'creation_time': 1481616993.195676, 'ref': 'an_host', 'ref_type': 'host', 'command': 'my_command.sh', 'depend_on': [], 'depend_on_me': [], 'dependency_check': False, 'env': {}, 'execution_time': 0.0, 'from_trigger': False, 'is_a': 'check', 'log_actions': False, 'module_type': 'fork', 's_time': 0.0, 't_to_go': 0.0, 'timeout': 10, 'type': '', 'u_time': 0.0, 'my_worker': 'test_worker', 'my_scheduler': 'test_scheduler', } # Will fill the action properties with the parameters # The missing parameters will be set with their default value check = Check(parameters) # And it will add an uuid parameters['uuid'] = check.uuid # Those parameters are missing in the provided parameters but they will exist in the object parameters.update({ '_in_timeout': False, '_is_orphan': False, 'exit_status': 3, 'internal': False, 'output': '', 'long_output': '', 'perf_data': '', 'passive_check': False, 'freshness_expiry_check': False, 'poller_tag': 'None', 'reactionner_tag': 'None', 'state': 0, 'status': 'scheduled', 'last_poll': 0, 'wait_time': 0.001 }) assert check.__dict__ == parameters def test_action(self): """ Test simple action execution :return: None """ a = Action() a.command = os.path.join(self._test_dir, "libexec/dummy_command.sh") assert a.got_shell_characters() == False # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end self.wait_finished(a) assert 3 == a.exit_status assert 'done' == a.status assert "Hi, I'm for testing only. Please do not use me directly, really" == a.output assert "" == a.long_output assert "Hip=99% Hop=34mm" == a.perf_data def test_action_timeout(self): """ Test simple action execution - fail on timeout :return: None """ # Normal esxecution # ----------------- a = Action() # Expect no more than 30 seconds execution time a.timeout = 30 # Action is sleeping for 10 seconds a.command = os.path.join(self._test_dir, "libexec/sleep_command.sh 10") # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end, not more than 5 secondes self.wait_finished(a, timeout=30) assert 0 == a.exit_status assert 'done' == a.status assert "I start sleeping for 10 seconds..." == a.output assert "I awoke after sleeping 10 seconds" == a.long_output assert "sleep=10" == a.perf_data # Too long esxecution # ------------------- a = Action() # Expect no more than 5 seconds execution time a.timeout = 5 # Action is sleeping for 10 seconds a.command = os.path.join(self._test_dir, "libexec/sleep_command.sh 10") # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end, not more than 5 secondes self.wait_finished(a, timeout=10) assert 3 == a.exit_status assert 'timeout' == a.status assert "I start sleeping for 10 seconds..." == a.output assert "" == a.long_output assert "" == a.perf_data def test_echo_environment_variables(self): """ Test echo environment variables :return: None """ a = Action() a.command = "echo $ALIGNAK_TEST_VARIABLE" assert 'ALIGNAK_TEST_VARIABLE' not in a.get_local_environnement() a.env = {'ALIGNAK_TEST_VARIABLE': 'is now existing and defined'} assert 'ALIGNAK_TEST_VARIABLE' in a.get_local_environnement() assert a.get_local_environnement( )['ALIGNAK_TEST_VARIABLE'] == 'is now existing and defined' # Execute action a.execute() self.wait_finished(a) assert a.output == 'is now existing and defined' def test_grep_for_environment_variables(self): """ Test grep for environment variables :return: None """ a = Action() a.command = "/usr/bin/env | grep ALIGNAK_TEST_VARIABLE" assert 'ALIGNAK_TEST_VARIABLE' not in a.get_local_environnement() a.env = {'ALIGNAK_TEST_VARIABLE': 'is now existing and defined'} assert 'ALIGNAK_TEST_VARIABLE' in a.get_local_environnement() assert a.get_local_environnement( )['ALIGNAK_TEST_VARIABLE'] == 'is now existing and defined' # Execute action a.execute() self.wait_finished(a) assert a.output == 'ALIGNAK_TEST_VARIABLE=is now existing and defined' def test_environment_variables(self): """ Test environment variables :return: None """ class ActionWithoutPerfData(Action): def get_outputs(self, out, max_len): """ For testing only... Do not cut the outputs into perf_data to avoid problems with enviroment containing a dash like in `LESSOPEN=|/usr/bin/lesspipe.sh %s` """ self.output = out a = ActionWithoutPerfData() a.command = "/usr/bin/env" assert 'ALIGNAK_TEST_VARIABLE' not in a.get_local_environnement() a.env = {'ALIGNAK_TEST_VARIABLE': 'is now existing and defined'} assert False == a.got_shell_characters() assert 'ALIGNAK_TEST_VARIABLE' in a.get_local_environnement() assert a.get_local_environnement( )['ALIGNAK_TEST_VARIABLE'] == 'is now existing and defined' # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end and set the max output we want for the command self.wait_finished(a, size=20*1024) searched_env_found = False for line in a.output.splitlines(): if line == 'ALIGNAK_TEST_VARIABLE=is now existing and defined': searched_env_found = True assert searched_env_found def test_noshell_bang_command(self): """ Test no shebang in the command script Some commands are shell without bangs! (like in Centreon...) We can detect it in the launch, and it should be managed :return: None """ a = Action() a.command = os.path.join( self._test_dir, "libexec/dummy_command_nobang.sh") assert False == a.got_shell_characters() a.execute() # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end self.wait_finished(a) assert 0 == a.exit_status assert 'done' == a.status assert "Hi, I'm for testing only. Please do not use me directly, really" == a.output assert "" == a.long_output assert "Hip=99% Bob=34mm" == a.perf_data def test_got_shell_characters(self): """ Test shell characters in the command (&>...) :return: None """ a = Action() a.command = os.path.join( self._test_dir, "libexec/dummy_command_nobang.sh && echo finished ok") assert True == a.got_shell_characters() # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end self.wait_finished(a) assert 0 == a.exit_status assert 'done' == a.status assert "Hi, I'm for testing only. Please do not use me directly, really" == a.output assert "finished ok" == a.long_output assert "Hip=99% Bob=34mm" == a.perf_data def test_got_pipe_shell_characters(self): """ Test pipe shell character in the command :return: None """ a = Action() a.command = os.path.join( self._test_dir, "libexec/dummy_command_nobang.sh | grep 'I will not match this search!'") assert True == a.got_shell_characters() # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end self.wait_finished(a) assert 1 == a.exit_status assert 'done' == a.status assert "" == a.output assert "" == a.long_output assert "" == a.perf_data def test_got_unclosed_quote(self): """ Test unclosed quote in the command :return: None """ # https://github.com/naparuba/shinken/issues/155 a = Action() a.command = os.path.join( self._test_dir, "libexec/dummy_command_nobang.sh -a 'wwwwzzzzeeee") # Run the action script with pytest.raises(ActionError): a.execute() # Do not wait for end because it did not really started ... assert 'done' == a.status assert 'Not a valid shell command: No closing quotation' == a.output assert 3 == a.exit_status def test_huge_output(self): """ Test huge output We got problems on LARGE output, more than 64K in fact. We try to solve it with the fcntl and non blocking read instead of "communicate" mode. So here we try to get a 100K output. Should NOT be in a timeout :return: None """ # Set max output length max_output_length = 131072 a = Action() a.timeout = 15 a.command = r"""python -u -c 'print("."*%d)'""" % max_output_length ### # 1 - output is less than the max output ### # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end and set the max output we want for the command self.wait_finished(a, size=max_output_length + 1) assert 0 == a.exit_status assert 'done' == a.status assert "."*max_output_length == a.output assert "" == a.long_output assert "" == a.perf_data ### # 2 - output is equal to the max output ### # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end and set the max output we want for the command self.wait_finished(a, size=max_output_length) assert 0 == a.exit_status assert 'done' == a.status assert "."*max_output_length == a.output assert "" == a.long_output assert "" == a.perf_data ### # 3 - output is more than the max output ### # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end and set the max output we want for the command self.wait_finished(a, size=max_output_length - 10) assert 0 == a.exit_status assert 'done' == a.status assert "."*(max_output_length - 10) == a.output assert "" == a.long_output assert "" == a.perf_data @pytest.mark.skip(reason="This test runs ok when it is the only test run in this module!") def test_start_do_not_fail_with_utf8(self): """ Test command process do not fail with utf8 :return: None """ # 1 - French a = Action() # A French text - note the double quotes escaping! a.command = u"/bin/echo \"Les naïfs ægithales hâtifs pondant à Noël où il gèle sont sûrs " \ u"d'être déçus en voyant leurs drôles d'œufs abîmés.\"" # Run the action script a.execute() # Wait action execution end and set the max output we want for the command self.wait_finished(a) assert 0 == a.exit_status assert 'done' == a.status assert u"Les naïfs ægithales hâtifs pondant à Noël où il gèle sont sûrs " \ u"d'être déçus en voyant leurs drôles d'œufs abîmés." == a.output assert "" == a.long_output assert "" == a.perf_data # 2 - Russian sentence a = Action() # A russian text a.command = u"/bin/echo На берегу пустынных волн" # Run the action script a.execute() # Wait action execution end and set the max output we want for the command self.wait_finished(a) assert 0 == a.exit_status assert 'done' == a.status assert u"На берегу пустынных волн" == a.output assert "" == a.long_output assert "" == a.perf_data # 3 - Russian text a = Action() # A russian text (long output) a.command = u"/bin/echo 'На берегу пустынных волн\n" \ u"Стоял он, дум великих полн,\n" \ u"И вдаль глядел. Пред ним широко\n" \ u"Река неслася; бедный чёлн\n" \ u"По ней стремился одиноко.\n" \ u"По мшистым, топким берегам\n" \ u"Чернели избы здесь и там,\n" \ u"Приют убогого чухонца;\n" \ u"И лес, неведомый лучам\n" \ u"В тумане спрятанного солнца,\n" \ u"Кругом шумел.'" # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end and set the max output we want for the command self.wait_finished(a) assert 0 == a.exit_status assert 'done' == a.status assert u"На берегу пустынных волн" == a.output assert u"Стоял он, дум великих полн,\n" \ u"И вдаль глядел. Пред ним широко\n" \ u"Река неслася; бедный чёлн\n" \ u"По ней стремился одиноко.\n" \ u"По мшистым, топким берегам\n" \ u"Чернели избы здесь и там,\n" \ u"Приют убогого чухонца;\n" \ u"И лес, неведомый лучам\n" \ u"В тумане спрятанного солнца,\n" \ u"Кругом шумел." == a.long_output assert "" == a.perf_data def test_non_zero_exit_status_empty_output_but_non_empty_stderr(self): """ Test catch stdout and stderr :return: None """ a = Action() a.command = "echo Output to stderr >&2 ; exit 1" # Run the action script a.execute() assert 'launched' == a.status # Wait action execution end and set the max output we want for the command self.wait_finished(a) assert 1 == a.exit_status assert 'done' == a.status assert "Output to stderr" == a.output assert "" == a.long_output assert "" == a.perf_data
class TestAction(AlignakTest): def setUp(self): pass def wait_finished(self, a, size=8192, timeout=20): pass def test_action_creation(self): ''' Test action object creation / initialization :return: None ''' pass def test_action_creation(self): ''' Test simple action execution :return: None ''' pass def test_action_timeout(self): ''' Test simple action execution - fail on timeout :return: None ''' pass def test_echo_environment_variables(self): ''' Test echo environment variables :return: None ''' pass def test_grep_for_environment_variables(self): ''' Test grep for environment variables :return: None ''' pass def test_environment_variables(self): ''' Test environment variables :return: None ''' pass class ActionWithoutPerfData(Action): def get_outputs(self, out, max_len): ''' For testing only... Do not cut the outputs into perf_data to avoid problems with enviroment containing a dash like in `LESSOPEN=|/usr/bin/lesspipe.sh %s` ''' pass def test_noshell_bang_command(self): ''' Test no shebang in the command script Some commands are shell without bangs! (like in Centreon...) We can detect it in the launch, and it should be managed :return: None ''' pass def test_got_shell_characters(self): ''' Test shell characters in the command (&>...) :return: None ''' pass def test_got_pipe_shell_characters(self): ''' Test pipe shell character in the command :return: None ''' pass def test_got_unclosed_quote(self): ''' Test unclosed quote in the command :return: None ''' pass def test_huge_output(self): ''' Test huge output We got problems on LARGE output, more than 64K in fact. We try to solve it with the fcntl and non blocking read instead of "communicate" mode. So here we try to get a 100K output. Should NOT be in a timeout :return: None ''' pass @pytest.mark.skip(reason="This test runs ok when it is the only test run in this module!") def test_start_do_not_fail_with_utf8(self): ''' Test command process do not fail with utf8 :return: None ''' pass def test_non_zero_exit_status_empty_output_but_non_empty_stderr(self): ''' Test catch stdout and stderr :return: None ''' pass
19
14
33
4
20
9
2
0.44
1
7
5
0
15
0
15
70
541
81
319
42
300
141
229
41
211
7
2
2
28
3,912
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_actions.py
tests.test_actions.TestAction.test_environment_variables.ActionWithoutPerfData
class ActionWithoutPerfData(Action): def get_outputs(self, out, max_len): """ For testing only... Do not cut the outputs into perf_data to avoid problems with enviroment containing a dash like in `LESSOPEN=|/usr/bin/lesspipe.sh %s` """ self.output = out
class ActionWithoutPerfData(Action): def get_outputs(self, out, max_len): ''' For testing only... Do not cut the outputs into perf_data to avoid problems with enviroment containing a dash like in `LESSOPEN=|/usr/bin/lesspipe.sh %s` ''' pass
2
1
6
0
2
4
1
1.33
1
0
0
0
1
1
1
16
7
0
3
3
1
4
3
3
1
1
4
0
1
3,913
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_daemon_start.py
tests.test_daemon_start.TemplateDaemonStart
class TemplateDaemonStart(): @classmethod def setUpClass(cls): # we'll chdir() to it in tearDown.. cls._launch_dir = os.getcwd() @classmethod def tearDown(cls): os.chdir(cls._launch_dir) def get_login_and_group(self, daemon): try: daemon.user = get_cur_user() daemon.group = get_cur_group() except OSError: # on some rare case, we can have a problem here # so bypass it and keep default value return def create_daemon(self, is_daemon=False, do_replace=False, debug_file=None): cls = self.daemon_cls # This to allow using a reference configuration if needed, # and to make some tests easier to set-up print("Preparing default configuration...") # if os.path.exists('/tmp/etc/alignak'): # shutil.rmtree('/tmp/etc/alignak') # shutil.copytree('../etc', '/tmp/etc/alignak') # files = ['/tmp/etc/alignak/alignak.ini'] # replacements = { # '_dist=/usr/local/': '_dist=/tmp', # ';is_daemon=0': 'is_daemon=0' # } # self._files_update(files, replacements) # print("Prepared") print("Env: %s, daemon: %s, daemonize: %s, replace: %s, debug: %s" % (alignak_environment, self.daemon_name, is_daemon, do_replace, debug_file)) args = { 'env_file': alignak_environment, 'alignak_name': 'my-alignak', 'daemon_name': self.daemon_name, 'is_daemon': is_daemon, 'do_replace': do_replace, 'config_file': None, 'debug': debug_file is not None, 'debug_file': debug_file, } return cls(**args) def get_daemon(self, is_daemon=False, do_replace=False, free_port=True, debug_file=None): """ :param free_port: get a free port (True) or use the configuration defined port (False) :return: """ print("Get daemon...") daemon = self.create_daemon(is_daemon, do_replace, debug_file) print("Got: %s" % daemon) return daemon def start_daemon(self, daemon): """ Start the daemon :param daemon: :return: """ print("Starting daemon: %s" % daemon.name) # daemon.load_modules_manager() # daemon.do_load_modules([]) daemon.do_daemon_init_and_start(set_process_title=False) print("Started: %s" % daemon.name) def stop_daemon(self, daemon): """ Stop the daemon :param daemon: :return: """ # Do not call request_stop because it sys.exit ... and this stops the test! # daemon.request_stop() # Instead call the same code hereunder: print("Stopping daemon: %s" % daemon.name) daemon.unlink() daemon.do_stop() print("Stopped") @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_config_and_start_and_stop_debug(self): """ Test configuration loaded, daemon started and stopped - daemon in debug mode :return: """ # Start normally with debug file self.test_config_and_start_and_stop(debug_file='/tmp/debug-daemon.log') @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_config_and_start_and_stop(self, debug_file=None): """ Test configuration loaded, daemon started and stopped :return: """ # Start normally daemon = self.get_daemon(debug_file=debug_file) print("Got daemon: %s" % daemon) if debug_file: assert daemon.debug is True else: assert daemon.debug_file is None assert daemon.pid_filename == os.path.abspath( '/tmp/var/run/alignak/%s.pid' % daemon.name) save_pid_fname = daemon.pid_filename # assert daemon.log_filename == os.path.abspath('./cfg/daemons/log/%s.log' % daemon.name) # Because logs are defined in the logger configuration assert daemon.log_filename == '' # Start the daemon self.start_daemon(daemon) # Check PID file assert os.path.exists(daemon.pid_filename) time.sleep(5) # Stop the daemon and unlink the pid file self.stop_daemon(daemon) assert not os.path.exists(daemon.pid_filename) # Reset initial working dir os.chdir(self._launch_dir) # Start as a daemon and replace if still exists print("Cwd: %s" % self._launch_dir) daemon = self.get_daemon( is_daemon=False, do_replace=False, free_port=False) print("Cwd: %s" % self._launch_dir) # Use the same pid file assert daemon.pid_filename == save_pid_fname # assert daemon.log_filename == os.path.abspath('./cfg/daemons/log/%s.log' % daemon.name) # Because logs are defined in the logger configuration assert daemon.log_filename == '' # Update working dir to use temporary daemon.workdir = tempfile.mkdtemp() daemon.pid_filename = os.path.join(daemon.workdir, "daemon.pid") # Start the daemon self.start_daemon(daemon) assert os.path.exists(daemon.pid_filename) time.sleep(5) #  Stop the daemon self.stop_daemon(daemon) assert not os.path.exists(daemon.pid_filename) @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_config_and_replace_and_stop(self): """ Test configuration loaded, daemon started, replaced and stopped :return: """ # # Start normally daemon = self.get_daemon( is_daemon=False, do_replace=False, free_port=False) assert daemon.debug_file == None assert daemon.pid_filename == os.path.abspath( '/tmp/var/run/alignak/%s.pid' % daemon.name) # assert daemon.log_filename == os.path.abspath('./cfg/daemons/log/%s.log' % daemon.name) # Because logs are defined in the logger configuration assert daemon.log_filename == '' # Start the daemon self.start_daemon(daemon) # Get PID assert os.path.exists(daemon.pid_filename) fpid = open(daemon.pid_filename, 'r+') pid_var = fpid.readline().strip(' \r\n') print("Daemon's pid: %s" % pid_var) # Get daemon statistics stats = daemon.get_daemon_stats() print("Daemon: %s" % daemon.__dict__) assert 'alignak' in stats assert 'version' in stats assert 'name' in stats assert 'type' in stats assert stats['name'] == daemon.name assert stats['type'] == daemon.type assert 'spare' in stats assert 'program_start' in stats assert 'modules' in stats assert 'metrics' in stats time.sleep(2) # Stop the daemon, but do not unlink the pid file # self.stop_daemon(d) daemon.do_stop() assert os.path.exists(daemon.pid_filename) # Update log file information daemon.log_filename = os.path.abspath( os.path.join(daemon.logdir, daemon.name + ".log")) print("Daemon's logdir: %s" % daemon.logdir) print("Daemon's log: %s" % daemon.log_filename) # Do not reload the configuration file (avoid replacing modified properties for the test...) daemon.setup_alignak_logger() # Reset initial working dir os.chdir(self._launch_dir) # Start as a daemon and replace if still exists daemon = self.get_daemon( is_daemon=False, do_replace=True, free_port=False) assert daemon.pid_filename == os.path.abspath( '/tmp/var/run/alignak/%s.pid' % daemon.name) # assert daemon.log_filename == os.path.abspath('./cfg/daemons/log/%s.log' % daemon.name) # Because logs are defined in the logger configuration assert daemon.log_filename == '' # Update working dir to use temporary daemon.workdir = tempfile.mkdtemp() daemon.pid_filename = os.path.join(daemon.workdir, "daemon.pid") # Start the daemon self.start_daemon(daemon) assert os.path.exists(daemon.pid_filename) fpid = open(daemon.pid_filename, 'r+') pid_var = fpid.readline().strip(' \r\n') print("Daemon's (new) pid: %s" % pid_var) time.sleep(2) #  Stop the daemon self.stop_daemon(daemon) assert not os.path.exists(daemon.pid_filename) def test_bad_piddir(self): """ Test bad PID directory :return: """ daemon = self.get_daemon() daemon.workdir = tempfile.mkdtemp() daemon.pid_filename = os.path.abspath( os.path.join('/DONOTEXISTS', "daemon.pid")) with pytest.raises(SystemExit): self.start_daemon(daemon) # Stop the daemon self.stop_daemon(daemon) def test_bad_workdir(self): """ Test bad working directory :return: """ daemon = self.get_daemon() daemon.workdir = '/DONOTEXISTS' with pytest.raises(SystemExit): self.start_daemon(daemon) # Stop the daemon self.stop_daemon(daemon) def test_logger(self): """ Test logger setup :return: """ self.clear_logs() daemon = self.get_daemon() assert daemon.pid_filename == os.path.abspath( '%s/%s.pid' % (daemon.workdir, daemon.name)) assert daemon.log_filename == os.path.abspath( '%s/%s.log' % (daemon.logdir, daemon.name)) # assert daemon.log_filename == '' # Because logs are defined in the logger configuration # Do not reload the configuration file (avoid replacing modified properties for the test...) daemon.setup_alignak_logger() daemon.debug = True self.show_logs() # The daemon log file is set by the logger configuration ... if it did not exist # an exception should have been raised! # Stop the daemon self.stop_daemon(daemon) def test_daemon_header(self): """ Test daemon header :return: """ daemon = self.get_daemon() expected_result = [ u"-----", u" █████╗ ██╗ ██╗ ██████╗ ███╗ ██╗ █████╗ ██╗ ██╗", u" ██╔══██╗██║ ██║██╔════╝ ████╗ ██║██╔══██╗██║ ██╔╝", u" ███████║██║ ██║██║ ███╗██╔██╗ ██║███████║█████╔╝ ", u" ██╔══██║██║ ██║██║ ██║██║╚██╗██║██╔══██║██╔═██╗ ", u" ██║ ██║███████╗██║╚██████╔╝██║ ╚████║██║ ██║██║ ██╗", u" ╚═╝ ╚═╝╚══════╝╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝", u"-----", u"Alignak %s - %s daemon" % (VERSION, daemon.name), u"Copyright (c) 2015-2019: Alignak Team", u"License: AGPL", u"-----", u"Python: %s.%s" % (sys.version_info.major, sys.version_info.minor), u"-----", u"My pid: %s" % daemon.pid ] assert daemon.get_header()[:15] == expected_result def test_daemon_environment(self): """ Test daemon environment variables :return: """ os.environ['ALIGNAK_USER'] = 'fake_improbable_user' os.environ['ALIGNAK_GROUP'] = get_cur_group() # Exit because the user / group does not exist! with pytest.raises(SystemExit): daemon = self.get_daemon() os.environ['ALIGNAK_USER'] = get_cur_user() os.environ['ALIGNAK_GROUP'] = 'fake_improbable_group' # Exit because the user / group does not exist! with pytest.raises(SystemExit): daemon = self.get_daemon() del os.environ['ALIGNAK_USER'] del os.environ['ALIGNAK_GROUP'] @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_port_not_free(self): """ Test HTTP port not free detection :return: """ print("Testing port not free ... mypid=%d" % (os.getpid())) d1 = self.get_daemon() d2 = self.get_daemon() d1.workdir = tempfile.mkdtemp() d1.pid_filename = os.path.abspath( os.path.join(d1.workdir, "daemon.pid")) # d1.host = "127.0.0.1" # Force all interfaces print("Listening on: %s:%s" % (d1.host, d1.port)) self.start_daemon(d1) time.sleep(5) print("PID file: %s" % d1.pid_filename) assert os.path.exists(d1.pid_filename) # # Trying to open the daemon used port... # sock = socket.socket() # try: # sock.bind((d1.host, d1.port)) # print("Socket: %s" % sock.getsockname()[1]) # except socket.error as exp: # print("Error: %s" % exp) # else: # sock.close() # assert False, "The socket should not be available!" # so that second daemon will not see first started one: time.sleep(1) os.unlink(d1.pid_filename) d2.workdir = d1.workdir d2.pid_filename = d1.pid_filename d2.host = "127.0.0.1" # Force all interfaces # Use the same port as the first daemon d2.port = d1.port self.start_daemon(d2) time.sleep(5) #  Stop the daemon d2.do_stop() time.sleep(1) #  Stop the daemon d1.do_stop() time.sleep(1) @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_daemon_run(self): """ Test daemon start run :return: """ print("Get daemon... !!!") d1 = self.get_daemon() print("Daemon: %s" % d1) # d1.workdir = tempfile.mkdtemp() # d1.pid_filename = os.path.abspath(os.path.join(d1.workdir, "daemon.pid")) print("Listening on: %s:%s" % (d1.host, d1.port)) self.start_daemon(d1) # time.sleep(1) # try: # print("pid file: %s (%s)" % (d1.pid_filename, os.getpid())) # except Exception as exp: # print("Exception: %s" % exp) # assert os.path.exists(d1.pid_filename) # print("Cherrypy: %s" % (d1.http_daemon.cherrypy_thread)) # # print("Cherrypy: %s (%s)" % (d1.http_daemon.cherrypy_thread, d1.http_daemon.cherrypy_thread.__dict__)) # # time.sleep(5) # # # Get daemon statistics # stats = d1.get_daemon_stats() # print("Daemon stats: %s" % stats) # These properties are only provided by the Web interface # assert 'alignak' in stats # assert 'version' in stats # assert 'name' in stats # assert 'type' in stats # assert stats['name'] == d1.name # assert stats['type'] == d1.type # assert 'spare' in stats # assert 'program_start' in stats # assert 'modules' in stats # assert 'metrics' in stats # # time.sleep(1) # # # Stop the daemon # # d1.do_stop() time.sleep(1) #  Stop the daemon self.stop_daemon(d1)
class TemplateDaemonStart(): @classmethod def setUpClass(cls): pass @classmethod def tearDown(cls): pass def get_login_and_group(self, daemon): pass def create_daemon(self, is_daemon=False, do_replace=False, debug_file=None): pass def get_daemon(self, is_daemon=False, do_replace=False, free_port=True, debug_file=None): ''' :param free_port: get a free port (True) or use the configuration defined port (False) :return: ''' pass def start_daemon(self, daemon): ''' Start the daemon :param daemon: :return: ''' pass def stop_daemon(self, daemon): ''' Stop the daemon :param daemon: :return: ''' pass @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_config_and_start_and_stop_debug(self): ''' Test configuration loaded, daemon started and stopped - daemon in debug mode :return: ''' pass @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_config_and_start_and_stop_debug(self): ''' Test configuration loaded, daemon started and stopped :return: ''' pass @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_config_and_replace_and_stop(self): ''' Test configuration loaded, daemon started, replaced and stopped :return: ''' pass def test_bad_piddir(self): ''' Test bad PID directory :return: ''' pass def test_bad_workdir(self): ''' Test bad working directory :return: ''' pass def test_logger(self): ''' Test logger setup :return: ''' pass def test_daemon_header(self): ''' Test daemon header :return: ''' pass def test_daemon_environment(self): ''' Test daemon environment variables :return: ''' pass @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_port_not_free(self): ''' Test HTTP port not free detection :return: ''' pass @pytest.mark.skip("Not easily testable with CherryPy ... " "by the way this will mainly test Cherrypy ;)") def test_daemon_run(self): ''' Test daemon start run :return: ''' pass
25
13
23
3
11
9
1
0.75
0
2
0
6
15
0
17
17
427
73
206
48
176
154
171
36
153
2
0
1
19
3,914
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_dependencies.py
tests.test_dependencies.TestDependencies
class TestDependencies(AlignakTest): """ This class test dependencies between services, hosts This is how the tests are named: * test_u_<function_name>: unit test for a function * test_c_*: test configuration * test_a_*: test with only active checks * test_p_*: test with only passive checks * test_ap_*: test with both active and passive checks * test_*_s_*: test simple dependencies (2 dependencies) * test_*_m_*: test complex dependencies (> 2 dependencies) * test_*_h_*: test with hostgroups """ def setUp(self): super(TestDependencies, self).setUp() def test_u_is_enable_action_dependent(self): """ Test the function is_enable_action_dependent in SchedulingItem :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 hosts = self._scheduler.hosts services = self._scheduler.services # a. 1 dep host host = self._scheduler.hosts.find_by_name("test_host_0") router = self._scheduler.hosts.find_by_name("test_router_0") assert 1 == len(host.act_depend_of) assert router.uuid == host.act_depend_of[0][0] host.act_depend_of[0][1] = ['d', 'x'] for state in ['o', 'UP']: router.state = state assert host.is_enable_action_dependent(hosts, services) for state in ['d', 'DOWN', 'x', 'UNREACHABLE']: router.state = state assert not host.is_enable_action_dependent(hosts, services) host.act_depend_of[0][1] = ['n'] for state in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router.state = state assert host.is_enable_action_dependent(hosts, services) host.act_depend_of[0][1] = ['d', 'n'] for state in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router.state = state assert host.is_enable_action_dependent(hosts, services) # b. 3 dep host = self._scheduler.hosts.find_by_name("test_host_0") router = self._scheduler.hosts.find_by_name("test_router_0") router_00 = self._scheduler.hosts.find_by_name("test_router_00") host_00 = self._scheduler.hosts.find_by_name("test_host_00") assert 1 == len(host.act_depend_of) assert router.uuid == host.act_depend_of[0][0] # add dependencies ado = copy(host.act_depend_of[0]) ado[0] = router_00.uuid host.act_depend_of.append(ado) ado = copy(host.act_depend_of[0]) ado[0] = host_00.uuid host.act_depend_of.append(ado) assert 3 == len(host.act_depend_of) assert router.uuid == host.act_depend_of[0][0] assert router_00.uuid == host.act_depend_of[1][0] assert host_00.uuid == host.act_depend_of[2][0] host.act_depend_of[0][1] = ['d', 'x'] host.act_depend_of[1][1] = ['d', 'x'] host.act_depend_of[2][1] = ['d', 'x'] for rstate in ['o', 'UP']: router.state = rstate for r00state in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router_00.state = r00state for hstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate assert host.is_enable_action_dependent(hosts, services) for rstate in ['d', 'DOWN', 'x', 'UNREACHABLE']: router.state = rstate for r00state in ['o', 'UP']: router_00.state = r00state for hstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate assert host.is_enable_action_dependent(hosts, services) for r00state in ['d', 'DOWN', 'x', 'UNREACHABLE']: router_00.state = r00state for hstate in ['o', 'UP']: host_00.state = hstate assert host.is_enable_action_dependent(hosts, services) for hstate in ['d', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate assert not host.is_enable_action_dependent(hosts, services) host.act_depend_of[1][1] = ['n'] for rstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router.state = rstate for r00state in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router_00.state = r00state for hstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate assert host.is_enable_action_dependent(hosts, services) host.act_depend_of[1][1] = ['d', 'n'] for rstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router.state = rstate for r00state in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router_00.state = r00state for hstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate assert host.is_enable_action_dependent(hosts, services) def test_u_check_and_set_unreachability(self): """ Test the function check_and_set_unreachability in SchedulingItem :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 hosts = self._scheduler.hosts services = self._scheduler.services host = self._scheduler.hosts.find_by_name("test_host_0") router = self._scheduler.hosts.find_by_name("test_router_0") router_00 = self._scheduler.hosts.find_by_name("test_router_00") host_00 = self._scheduler.hosts.find_by_name("test_host_00") assert 1 == len(host.act_depend_of) assert router.uuid == host.act_depend_of[0][0] # add dependencies ado = copy(host.act_depend_of[0]) ado[0] = router_00.uuid host.act_depend_of.append(ado) ado = copy(host.act_depend_of[0]) ado[0] = host_00.uuid host.act_depend_of.append(ado) assert 3 == len(host.act_depend_of) assert router.uuid == host.act_depend_of[0][0] assert router_00.uuid == host.act_depend_of[1][0] assert host_00.uuid == host.act_depend_of[2][0] for rstate in ['o', 'UP']: router.state = rstate for r00state in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: router_00.state = r00state for hstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate host.state = 'UP' host.check_and_set_unreachability(hosts, services) assert 'UP' == host.state for rstate in ['d', 'DOWN', 'x', 'UNREACHABLE']: router.state = rstate for r00state in ['o', 'UP']: router_00.state = r00state for hstate in ['o', 'UP', 'd', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate host.state = 'UP' host.check_and_set_unreachability(hosts, services) assert 'UP' == host.state for r00state in ['d', 'DOWN', 'x', 'UNREACHABLE']: router_00.state = r00state for hstate in ['o', 'UP']: host_00.state = hstate host.state = 'UP' host.check_and_set_unreachability(hosts, services) assert 'UP' == host.state for hstate in ['d', 'DOWN', 'x', 'UNREACHABLE']: host_00.state = hstate host.state = 'UP' host.check_and_set_unreachability(hosts, services) assert 'UNREACHABLE' == host.state def test_c_dependencies_default(self): """ Test dependencies correctly loaded from config files :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 for host in self._scheduler.hosts: print("Host: %s" % host) print("- parents: %s" % host.parent_dependencies) # print("- I depend of them: %s" % host.act_depend_of) for uuid in host.parent_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) print("- children: %s" % host.child_dependencies) # print("- They depend of me: %s" % host.act_depend_of_me) for uuid in host.child_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) for svc in self._scheduler.services: print("Service: %s" % svc) print("- parents: %s" % svc.parent_dependencies) # print("- I depend of them: %s" % svc.act_depend_of) for uuid in svc.parent_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) print("- children: %s" % svc.child_dependencies) # print("- They depend of me: %s" % svc.act_depend_of_me) for uuid in svc.child_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) # test_router_0 host = self._scheduler.hosts.find_by_name("test_router_0") # Parents assert len(host.act_depend_of) == 0 # Children assert len(host.act_depend_of_me) == 1 for uuid in host.child_dependencies: assert self._scheduler.hosts[uuid].host_name == 'test_host_0' for (uuid, _, _, _) in host.act_depend_of_me: assert self._scheduler.hosts[uuid].host_name == 'test_host_0' # test_host_0 -> test_router_0 host = self._scheduler.hosts.find_by_name("test_host_0") # Parents assert 1 == len(host.act_depend_of) for uuid in host.parent_dependencies: assert self._scheduler.hosts[uuid].host_name == 'test_router_0' for (uuid, _, _, _) in host.act_depend_of: assert self._scheduler.hosts[uuid].host_name == 'test_router_0' # Children assert len(host.act_depend_of_me) == 1 for uuid in host.child_dependencies: assert self._scheduler.services[uuid].get_full_name( ) == 'test_host_0/test_ok_0' for (uuid, _, _, _) in host.act_depend_of_me: assert self._scheduler.services[uuid].get_full_name( ) == 'test_host_0/test_ok_0' # test test_host_0.test_ok_0 -> test_host_0 svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") for (dep_id, _, _, _) in svc.act_depend_of: if dep_id in self._scheduler.hosts: # Depends from its host assert self._scheduler.hosts[dep_id].host_name == 'test_host_0' else: # Depends from another service of its host assert self._scheduler.services[dep_id].host_name == 'test_host_0' assert self._scheduler.services[dep_id].service_description == 'test_ok_0' def test_c_dependencies(self): """ Test dependencies correctly loaded from config files :return: None """ self.set_unit_tests_logger_level() self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) self.show_logs() assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 for host in self._scheduler.hosts: print("Host: %s" % host) print("- parents: %s" % host.parent_dependencies) # print("- I depend of them: %s" % host.act_depend_of) for uuid in host.parent_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) print("- children: %s" % host.child_dependencies) # print("- They depend of me: %s" % host.act_depend_of_me) for uuid in host.child_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) for svc in self._scheduler.services: print("Service: %s" % svc) print("- parents: %s" % svc.parent_dependencies) # print("- I depend of them: %s" % svc.act_depend_of) for uuid in svc.parent_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) print("- children: %s" % svc.child_dependencies) # print("- They depend of me: %s" % svc.act_depend_of_me) for uuid in svc.child_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) # test_host_00 -> test_router_00 test_host_00 = self._scheduler.hosts.find_by_name("test_host_00") assert 1 == len(test_host_00.act_depend_of) for (host, _, _, _) in test_host_00.act_depend_of: assert self._scheduler.hosts[host].host_name == 'test_router_00' # test test_host_00.test_ok_1 -> test_host_00 # test test_host_00.test_ok_1 -> test_host_00.test_ok_0 svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_1") for (dep_id, _, _, _) in svc.act_depend_of: if dep_id in self._scheduler.hosts: # Depends from its host assert self._scheduler.hosts[dep_id].host_name == 'test_host_00' else: # Depends from another service of its host assert self._scheduler.services[dep_id].host_name == 'test_host_00' assert self._scheduler.services[dep_id].service_description == 'test_ok_0' # test test_host_C -> test_host_A # test test_host_C -> test_host_B test_host_c = self._scheduler.hosts.find_by_name("test_host_C") assert 2 == len(test_host_c.act_depend_of) hosts = [] for (host, _, _, _) in test_host_c.act_depend_of: hosts.append(self._scheduler.hosts[host].host_name) self.assertItemsEqual(hosts, ['test_host_A', 'test_host_B']) # test test_host_E -> test_host_D test_host_e = self._scheduler.hosts.find_by_name("test_host_E") assert 1 == len(test_host_e.act_depend_of) for (host, _, _, _) in test_host_e.act_depend_of: assert self._scheduler.hosts[host].host_name == 'test_host_D' # test test_host_11.test_parent_svc -> test_host_11.test_son_svc svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_11", "test_parent_svc") for (dep_id, _, _, _) in svc.act_depend_of: if dep_id in self._scheduler.hosts: assert self._scheduler.hosts[dep_id].host_name == 'test_host_11' else: assert self._scheduler.services[dep_id].service_description == 'test_son_svc' # test test_host_11.test_ok_1 -> test_host_11.test_ok_0 svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_11", "test_ok_1") for (dep_id, _, _, _) in svc.act_depend_of: if dep_id in self._scheduler.hosts: assert self._scheduler.hosts[dep_id].host_name == 'test_host_11' else: assert self._scheduler.services[dep_id].service_description == 'test_ok_0' def test_c_host_passive_service_active(self): """ Test host passive and service active :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host = self._scheduler.hosts.find_by_name("host_P") svc = self._scheduler.services.find_srv_by_name_and_hostname( "host_P", "service_A") assert 0 == len(svc.act_depend_of) def test_c_host_passive_service_passive(self): """ Test host passive and service passive :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host = self._scheduler.hosts.find_by_name("host_P") svc = self._scheduler.services.find_srv_by_name_and_hostname( "host_P", "service_P") assert 0 == len(svc.act_depend_of) def test_c_host_active_service_passive(self): """ Test host active and service passive :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host = self._scheduler.hosts.find_by_name("host_A") svc = self._scheduler.services.find_srv_by_name_and_hostname( "host_A", "service_P") assert 1 == len(svc.act_depend_of) assert host.uuid == svc.act_depend_of[0][0] def test_c_host_active_on_host_passive(self): """ Test host active on host active :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host0 = self._scheduler.hosts.find_by_name("host_P_0") host1 = self._scheduler.hosts.find_by_name("host_A_P") assert 0 == len(host1.act_depend_of) def test_c_host_passive_on_host_active(self): """ Test host passive on host active :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host0 = self._scheduler.hosts.find_by_name("host_A_0") host1 = self._scheduler.hosts.find_by_name("host_P_A") assert 1 == len(host1.act_depend_of) assert host0.uuid == host1.act_depend_of[0][0] def test_c_host_passive_on_host_passive(self): """ Test host passive on host passive :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host0 = self._scheduler.hosts.find_by_name("host_P_0") host1 = self._scheduler.hosts.find_by_name("host_P_P") assert 0 == len(host1.act_depend_of) def test_c_options_x(self): """ Test conf for 'x' (UNREACHABLE) in act_depend_of :return: """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) self.show_logs() assert self.conf_is_correct assert len(self.configuration_errors) == 0 assert len(self.configuration_warnings) == 0 host0 = self._scheduler.hosts.find_by_name("host_o_A") host1 = self._scheduler.hosts.find_by_name("host_o_B") assert 1 == len(host1.act_depend_of) assert host0.uuid == host1.act_depend_of[0][0] print(("Dep: %s" % host1.act_depend_of[0])) assert ['d', 'x'] == host1.act_depend_of[0][1] def test_conf_not_correct_1(self): """ Test that the arbiter raises an error when there is orphan dependency in config files in hostdependency, dependent_host_name is unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad1.cfg') self.assert_any_cfg_log_match(re.escape( "got a bad dependent_host_name definition" )) self.assert_any_cfg_log_match(re.escape( "Configuration is incorrect" )) self.assert_any_cfg_log_match(re.escape( "hostdependencies configuration is incorrect!" )) assert len(self.configuration_errors) == 3 assert len(self.configuration_warnings) == 0 def test_conf_not_correct_2(self): """ Test that the arbiter raises an error when we have an orphan dependency in config files in hostdependency, host_name unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad2.cfg') self.assert_any_cfg_log_match(re.escape( "got a bad host_name definition" )) self.assert_any_cfg_log_match(re.escape( "Configuration is incorrect" )) self.assert_any_cfg_log_match(re.escape( "hostdependencies configuration is incorrect!" )) assert len(self.configuration_errors) == 3 assert len(self.configuration_warnings) == 0 def test_conf_not_correct_3(self): """ Test that the arbiter raises an error when we have an orphan dependency in config files in host definition, the parent is unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad3.cfg') self.show_logs() self.assert_any_cfg_log_match(re.escape( "the parent 'test_router_notexist' for the host 'test_host_11' is unknown" )) self.assert_any_cfg_log_match(re.escape( "hosts configuration is incorrect!" )) assert len(self.configuration_errors) == 2 assert len(self.configuration_warnings) == 8 def test_conf_not_correct_4(self): """ Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, dependent_service_description is unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad4.cfg') self.show_logs() self.assert_any_cfg_log_match(re.escape( "Service test_ok_1_notfound not found for host test_host_00" )) self.assert_any_cfg_log_match(re.escape( "servicedependencies configuration is incorrect!" )) assert len(self.configuration_errors) == 2 assert len(self.configuration_warnings) == 0 def test_conf_not_correct_5(self): """ Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, dependent_host_name is unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad5.cfg') self.show_logs() self.assert_any_cfg_log_match(re.escape( "Service test_ok_1 not found for host test_host_00_notfound" )) self.assert_any_cfg_log_match(re.escape( "servicedependencies configuration is incorrect!" )) assert len(self.configuration_errors) == 2 assert len(self.configuration_warnings) == 0 def test_conf_not_correct_6(self): """ Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, host_name unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad6.cfg') self.show_logs() self.assert_any_cfg_log_match(re.escape( "Service test_ok_0 not found for host test_host_00_notfound" )) self.assert_any_cfg_log_match(re.escape( "servicedependencies configuration is incorrect!" )) assert len(self.configuration_errors) == 2 assert len(self.configuration_warnings) == 0 def test_conf_not_correct_7(self): """ Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, service_description unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad7.cfg') self.show_logs() self.assert_any_cfg_log_match(re.escape( "Service test_ok_0_notknown not found for host test_host_00" )) self.assert_any_cfg_log_match(re.escape( "Service test_ok_0_notknown not found for host test_host_11" )) self.assert_any_cfg_log_match(re.escape( "servicedependencies configuration is incorrect!" )) # Service test_ok_0_notknown not found for 2 hosts. assert len(self.configuration_errors) == 3 assert len(self.configuration_warnings) == 0 def test_conf_not_correct_8(self): """ Test that the arbiter raises an error when there is orphan dependency in config files in hostdependency, dependent_hostgroup_name is unknown :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/dependencies/cfg_dependencies_bad8.cfg') self.assert_any_cfg_log_match(re.escape( "got a bad dependent_host_name definition 'test_host_X'" )) self.assert_any_cfg_log_match(re.escape( "Configuration is incorrect" )) self.assert_any_cfg_log_match(re.escape( "hostdependencies configuration is incorrect!" )) assert len(self.configuration_errors) == 3 assert len(self.configuration_warnings) == 0 def test_a_s_service_host_up(self): """ Test dependency (checks and notifications) between the service and the host (case 1) 08:00:00 check_host OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host OK HARD 08:02:30 check_service CRITICAL HARD :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct host = self._scheduler.hosts.find_by_name("test_host_00") host.checks_in_progress = [] host.max_check_attempts = 1 host.event_handler_enabled = False svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_0") svc.max_check_attempts = 1 svc.checks_in_progress = [] svc.event_handler_enabled = False # notification_interval is in minute, configure to have one per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert 0 == svc.current_notification_number, 'All OK no notifications' self.assert_actions_count(0) self.assert_checks_count(12) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "OK" == svc.state self.assert_actions_count(0) assert 0 == svc.current_notification_number, 'Critical HARD, but check first host' # previous 10 + 2 checks: 1 for svc in waitdep and 1 scheduled for # test_host_00 (parent/dependent) self.assert_checks_count(12) # Order is not guaranteed... # self.assert_checks_match(10, 'test_hostcheck.pl', 'command') # self.assert_checks_match(10, 'hostname test_host_00', 'command') # self.assert_checks_match(10, 'scheduled', 'status') # self.assert_checks_match(11, 'waitdep', 'status') self.assert_any_check_match('scheduled', 'status') self.assert_any_check_match('wait_dependent', 'status') self.scheduler_loop(1, [[host, 0, 'UP']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state assert 1 == svc.current_notification_number, 'Critical HARD' self.assert_actions_count(2) self.assert_actions_match(0, 'notifier.pl --hostname test_host_00 --servicedesc test_ok_0 ' '--notificationtype PROBLEM --servicestate CRITICAL ' '--serviceoutput CRITICAL', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=PROBLEM, NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, NOTIFICATIONAUTHOR=n/a, ' 'NOTIFICATIONAUTHORNAME=n/a, NOTIFICATIONAUTHORALIAS=n/a, ' 'NOTIFICATIONCOMMENT=n/a, HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') self.assert_actions_match(1, 'VOID', 'command') self.assert_checks_count(12) def test_a_s_service_host_down(self): """ Test dependency (checks and notifications) between the service and the host (case 2) 08:00:00 check_host OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host DOWN HARD 08:02:30 check_service CRITICAL HARD :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct host = self._scheduler.hosts.find_by_name("test_host_00") host.checks_in_progress = [] host.max_check_attempts = 1 host.act_depend_of = [] host.event_handler_enabled = False svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_0") # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.max_check_attempts = 1 svc.checks_in_progress = [] svc.event_handler_enabled = False # notification_interval is in minute, configure to have one per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert 0 == svc.current_notification_number, 'All OK no notifications' self.assert_actions_count(0) self.show_checks() self.assert_checks_count(12) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "OK" == svc.state self.assert_actions_count(0) assert 0 == svc.current_notification_number, 'Critical HARD, but check first host' # previous 10 + 2 checks: 1 for svc in waitdep and 1 scheduled for # test_host_00 (parent/dependent) self.show_checks() self.assert_checks_count(12) # Order is not guaranteed... # self.assert_checks_match(11, 'test_hostcheck.pl', 'command') # self.assert_checks_match(11, 'hostname test_host_00', 'command') # self.assert_checks_match(11, 'scheduled', 'status') # self.assert_checks_match(10, 'waitdep', 'status') self.assert_any_check_match('scheduled', 'status') self.assert_any_check_match('wait_dependent', 'status') self.scheduler_loop(1, [[host, 2, 'DOWN']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "DOWN" == host.state assert "HARD" == svc.state_type assert "UNREACHABLE" == svc.state assert 0 == svc.current_notification_number, 'No notif, unreachable HARD' assert 1 == host.current_notification_number, '1 notif, down HARD' self.assert_actions_count(1) self.assert_actions_match( 0, '--hostname test_host_00 --notificationtype PROBLEM --hoststate DOWN', 'command') self.assert_checks_count(12) # test service keep in UNREACHABLE self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UNREACHABLE" == svc.state def test_a_s_host_host(self): """ Test the dependency between 2 hosts 08:00:00 check_host OK HARD 08:01:30 check_host (CRITICAL) => router check planned 08:02:30 check_router OK HARD 08:02:30 check_host CRITICAL HARD :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct host_00 = self._scheduler.hosts.find_by_name("test_host_00") host_00.checks_in_progress = [] host_00.max_check_attempts = 1 host_00.event_handler_enabled = False router_00 = self._scheduler.hosts.find_by_name("test_router_00") router_00.checks_in_progress = [] router_00.max_check_attempts = 1 router_00.event_handler_enabled = False # # notification_interval is in minute, configure to have one per minute # svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime self.scheduler_loop(1, [[host_00, 0, 'UP'], [router_00, 0, 'UP']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) self.assert_actions_count(0) self.assert_checks_count(12) self.scheduler_loop(1, [[host_00, 2, 'DOWN']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host_00.state assert "UP" == router_00.state self.assert_actions_count(0) self.assert_checks_count(12) # self.assert_checks_match(10, 'test_hostcheck.pl', 'command') # self.assert_checks_match(10, 'hostname test_host_00', 'command') # self.assert_checks_match(10, 'wait_dependent', 'status') # self.assert_checks_match(11, 'scheduled', 'status') self.scheduler_loop(1, [[router_00, 0, 'UP']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "DOWN" == host_00.state assert "UP" == router_00.state assert 1 == host_00.current_notification_number, 'Critical HARD' self.assert_actions_count(1) self.assert_actions_match(0, 'hostname test_host_00', 'command') self.assert_checks_count(12) def test_a_m_service_host_host_up(self): """ Test the dependencies between service -> host -> host 08:00:00 check_host OK HARD 08:00:00 check_router OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host (CRITICAL HARD) => router check planned 08:02:30 check_router UP HARD 08:02:30 check_host CRITICAL HARD 08:02:30 check_service CRITICAL HARD :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct router_00 = self._scheduler.hosts.find_by_name("test_router_00") router_00.checks_in_progress = [] router_00.max_check_attempts = 1 router_00.event_handler_enabled = False host = self._scheduler.hosts.find_by_name("test_host_00") host.checks_in_progress = [] host.max_check_attempts = 1 host.event_handler_enabled = False svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_0") svc.checks_in_progress = [] svc.max_check_attempts = 1 svc.event_handler_enabled = False # notification_interval is in minute, configure to have one per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Host is UP self.scheduler_loop( 1, [[router_00, 0, 'UP'], [host, 0, 'UP'], [svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == router_00.state assert "UP" == host.state assert "OK" == svc.state assert 0 == svc.current_notification_number, 'All OK no notifications' assert 0 == host.current_notification_number, 'All OK no notifications' self.assert_actions_count(0) self.assert_checks_count(12) # Service is CRITICAL print("====================== svc CRITICAL ===================") self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == router_00.state assert "UP" == host.state assert "OK" == svc.state assert 0 == svc.current_notification_number, 'No notifications' self.assert_actions_count(0) # New host check self.assert_checks_count(12) self.show_checks() # Host is DOWN print("====================== host DOWN ===================") self.scheduler_loop(1, [[host, 2, 'DOWN']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == router_00.state assert "UP" == host.state assert "OK" == svc.state assert 0 == svc.current_notification_number, 'No notifications' assert 0 == host.current_notification_number, 'No notifications' self.assert_actions_count(0) self.assert_checks_count(12) self.show_checks() # Router is UP print("====================== router UP ===================") self.scheduler_loop(1, [[router_00, 0, 'UP']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) self.scheduler_loop(1) self.show_checks() assert "UP" == router_00.state assert "DOWN" == host.state assert "UNREACHABLE" == svc.state assert 0 == svc.current_notification_number, 'No notifications' assert 1 == host.current_notification_number, '1 host notification' # Re-scheduled 3 checks self.assert_checks_count(12) self.show_checks() self.assert_actions_count(1) self.show_actions() self.assert_actions_match(0, 'notifier.pl --hostname test_host_00 --notificationtype ' 'PROBLEM --hoststate DOWN', 'command') def test_a_m_service_host_host_critical(self): """ Test the dependencies between service -> host -> host 08:00:00 check_host OK HARD 08:00:00 check_router OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host (CRITICAL HARD) => router check planned 08:02:30 check_router CRITICAL HARD 08:02:30 check_host CRITICAL HARD 08:02:30 check_service CRITICAL HARD :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) self.show_logs() # 4 hosts: # test_router_0 # test_host_0 # test_host_00 # test_host_11 assert self.conf_is_correct router_00 = self._scheduler.hosts.find_by_name("test_router_00") router_00.checks_in_progress = [] router_00.max_check_attempts = 1 router_00.event_handler_enabled = False host = self._scheduler.hosts.find_by_name("test_host_00") host.checks_in_progress = [] host.max_check_attempts = 1 host.event_handler_enabled = False svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_0") svc.checks_in_progress = [] svc.max_check_attempts = 1 svc.event_handler_enabled = False # notification_interval is in minute, configure to have one per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Host router is UP self.scheduler_loop( 1, [[router_00, 0, 'UP'], [host, 0, 'UP'], [svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == router_00.state assert "UP" == host.state assert "OK" == svc.state assert 0 == svc.current_notification_number, 'All OK no notifications' assert 0 == host.current_notification_number, 'All OK no notifications' self.assert_actions_count(0) # 9 checks: # 2 hosts # 7 services, but not our checked service ! Before we check the hosts dependencies ! # 3 more checks because of the time warp! self.show_checks() self.assert_checks_count(12) # Host service is CRITICAL print("====================== svc CRITICAL ===================") self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == router_00.state assert "UP" == host.state # The service remains OK, despite the critical, because we raise dependencies checks assert "OK" == svc.state # assert "SOFT" == svc.state_type assert 0 == svc.current_notification_number, 'No notifications' self.assert_actions_count(0) # Some more checks # test_host_0 and test_router_0 # The service itself is now checked self.assert_checks_count(12) self.show_checks() # Host is DOWN print("====================== host DOWN ===================") self.scheduler_loop(1, [[host, 2, 'DOWN']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == router_00.state # The host remains UP because we need to check the router dependency assert "UP" == host.state assert "OK" == svc.state assert 0 == svc.current_notification_number, 'No notifications' assert 0 == host.current_notification_number, 'No notifications' assert 0 == router_00.current_notification_number, 'No notifications' self.assert_actions_count(0) # Still the same checks count self.assert_checks_count(12) self.show_checks() # Router is now DOWN print("====================== router DOWN ===================") self.scheduler_loop(1, [[router_00, 2, 'DOWN']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) self.show_checks() assert "DOWN" == router_00.state assert "UNREACHABLE" == host.state assert "UNREACHABLE" == svc.state assert 0 == svc.current_notification_number, 'No notifications' assert 0 == host.current_notification_number, 'No notification' assert 1 == router_00.current_notification_number, '1 host notifications' # Re-scheduled 3 checks self.assert_checks_count(12) self.show_checks() self.assert_actions_count(1) self.show_actions() self.assert_actions_match( 0, 'notifier.pl --hostname test_router_00 --notificationtype PROBLEM --hoststate DOWN', 'command') def test_a_m_services(self): """ Test when multiple services dependency the host :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct host = self._scheduler.hosts.find_by_name("test_host_00") host.checks_in_progress = [] host.max_check_attempts = 1 host.event_handler_enabled = False print("%s depends of %s" % (host.get_full_name(), host.act_depend_of)) svc1 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_0") # To make tests quicker we make notifications send very quickly svc1.notification_interval = 20 svc1.checks_in_progress = [] svc1.max_check_attempts = 1 svc1.event_handler_enabled = False print("%s depends of %s" % (svc1.get_full_name(), svc1.act_depend_of)) svc2 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_00", "test_ok_1") # To make tests quicker we make notifications send very quickly svc2.notification_interval = 20 svc2.checks_in_progress = [] svc2.max_check_attempts = 1 svc2.event_handler_enabled = False print("%s depends of %s" % (svc2.get_full_name(), svc2.act_depend_of)) # # notification_interval is in minute, configure to have one per minute # svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime self.scheduler_loop( 1, [[host, 0, 'UP'], [svc1, 0, 'OK'], [svc2, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) self.scheduler_loop( 1, [[host, 0, 'UP'], [svc1, 0, 'OK'], [svc2, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc1.state_type assert "OK" == svc1.state assert "HARD" == svc2.state_type assert "OK" == svc2.state assert "HARD" == host.state_type assert "UP" == host.state self.assert_actions_count(0) self.assert_checks_count(12) print("====================== svc1 && svc2 CRITICAL ===================") self.scheduler_loop( 1, [[svc1, 2, 'CRITICAL'], [svc2, 2, 'CRITICAL']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) self.assert_actions_count(0) self.assert_checks_count(12) assert "UP" == host.state assert "OK" == svc1.state assert "OK" == svc2.state print("====================== host UP ===================") self.scheduler_loop(1, [[host, 0, 'UP']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host.state assert "CRITICAL" == svc1.state assert "CRITICAL" == svc2.state self.show_actions() assert 0 == host.current_notification_number, 'No notifications' assert 1 == svc1.current_notification_number, '1 notification' assert 1 == svc2.current_notification_number, '1 notification' self.assert_actions_count(4) # Both services have a notification self.assert_actions_match(-1, 'notifier.pl --hostname test_host_00 --servicedesc test_ok_0 ' '--notificationtype PROBLEM --servicestate CRITICAL ' '--serviceoutput CRITICAL', 'command') self.assert_actions_match(-1, 'NOTIFICATIONTYPE=PROBLEM, NOTIFICATIONRECIPIENTS=test_contact', 'command') self.assert_actions_match(-1, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command') self.assert_actions_match(-1, 'notifier.pl --hostname test_host_00 --servicedesc test_ok_1 ' '--notificationtype PROBLEM --servicestate CRITICAL ' '--serviceoutput CRITICAL', 'command') self.assert_actions_match(-1, 'NOTIFICATIONTYPE=PROBLEM, NOTIFICATIONRECIPIENTS=test_contact', 'command') self.assert_actions_match(-1, 'HOSTNOTIFICATIONNUMBER=1, SERVICENOTIFICATIONNUMBER=1', 'command') self.assert_actions_match(2, 'VOID', 'command') self.assert_actions_match(3, 'VOID', 'command') def test_p_s_service_not_check_passive_host(self): """ Test passive service critical not check the dependent host (passive) :return: None """ self.setup_with_file('cfg/cfg_dependencies.cfg', dispatching=True) assert self.conf_is_correct self._scheduler.update_recurrent_works_tick( {'tick_check_freshness': 1}) host = self._scheduler.hosts.find_by_name("test_host_E") svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_E", "test_ok_0") # Max attempts is 2 for this host assert host.max_check_attempts == 2 # Max attempts is 3 for this service assert svc.max_check_attempts == 3 assert 0 == len(svc.act_depend_of) # notification_interval is in minute, configure to have one per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Set host and service as OK excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_E;0;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_E;test_ok_0;0;Service is OK' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host.state assert "HARD" == host.state_type assert "OK" == svc.state assert "HARD" == svc.state_type self.assert_actions_count(0) # Set host DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_E;2;Host is DOWN' % time.time() self._scheduler.run_external_commands([excmd]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "DOWN" == host.state # SOFT state type on 1st attempt assert "SOFT" == host.state_type self.assert_actions_count(0) # Set host DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_E;2;Host is DOWN' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "DOWN" == host.state # HARD state type on 2nd attempt assert "HARD" == host.state_type # and an action is raised (PROBLEM notification) self.assert_actions_count(1) # Set host UP excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_E;0;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host.state assert "HARD" == host.state_type self.assert_actions_count(2) excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_E;test_ok_0;2;Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert "CRITICAL" == svc.state assert "SOFT" == svc.state_type self.assert_actions_count(2) excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_E;test_ok_0;2;Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert "CRITICAL" == svc.state assert "SOFT" == svc.state_type self.assert_actions_count(2) excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_E;test_ok_0;2;Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert "CRITICAL" == svc.state # Need 3 attempts for the HARD state assert "HARD" == svc.state_type self.assert_actions_count(3) self.assert_checks_count(12) def test_ap_s_passive_service_check_active_host(self): """ Test passive service critical check the dependent host (active) :return: None """ self.setup_with_file('cfg/cfg_dependencies_conf.cfg', dispatching=True) assert self.conf_is_correct self._scheduler.update_recurrent_works_tick( {'tick_check_freshness': 1}) host = self._scheduler.hosts.find_by_name("host_A") svc = self._scheduler.services.find_srv_by_name_and_hostname( "host_A", "service_P") assert 1 == len(svc.act_depend_of) # notification_interval is in minute, configure to have one per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime self.scheduler_loop(1, [[host, 0, 'UP']]) excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;host_A;service_P;0;Service is OK' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host.state assert "OK" == svc.state excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;host_A;service_P;2;Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host.state assert "OK" == svc.state self.assert_actions_count(0) self.assert_checks_count(11) self.assert_checks_match(10, 'wait_dependent', 'status') self.scheduler_loop(1, [[host, 2, 'DOWN']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "DOWN" == host.state assert "UNREACHABLE" == svc.state def test_c_h_hostdep_withno_depname(self): """ Test for host dependency dispatched on all hosts of an hostgroup 1st solution: define a specific property 2nd solution: define an hostgroup_name and do not define a dependent_hostgroup_name :return: """ self.setup_with_file( 'cfg/dependencies/hostdep_through_hostgroup.cfg', dispatching=True) assert self.conf_is_correct host0 = self._scheduler.hosts.find_by_name("test_host_0") assert host0 is not None host1 = self._scheduler.hosts.find_by_name("test_host_1") assert host1 is not None # Should got a link between host1 and host0 + link between host1 and router assert len(host1.act_depend_of) == 2 l = host1.act_depend_of[0] h = l[0] # the host that host1 depend on assert host0.uuid == h def test_c_h_explodehostgroup(self): """ Test for service dependencies dispatched on all hosts of an hostgroup 1st solution: define a specific property 2nd solution: define an hostgroup_name and do not define a dependent_hostgroup_name :return: """ self.setup_with_file( 'cfg/dependencies/servicedependency_explode_hostgroup.cfg', dispatching=True) self.show_logs() assert self.conf_is_correct # First version: explode_hostgroup property defined svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_router_0", "SNMP" ) assert len(svc.act_depend_of_me) == 2 dependent_services = [] for service in svc.act_depend_of_me: dependent_services.append(service[0]) service_dependencies = [] service_dependency_postfix = self._scheduler.services.\ find_srv_by_name_and_hostname("test_router_0", "POSTFIX") service_dependencies.append(service_dependency_postfix.uuid) service_dependency_cpu = self._scheduler.services.\ find_srv_by_name_and_hostname("test_router_0", "CPU") service_dependencies.append(service_dependency_cpu.uuid) assert set(service_dependencies) == set(dependent_services) # Second version: hostgroup_name and no dependent_hostgroup_name property defined svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_router_0", "SNMP" ) assert len(svc.act_depend_of_me) == 2 dependent_services = [] for service in svc.act_depend_of_me: dependent_services.append(service[0]) service_dependencies = [] service_dependency_postfix = self._scheduler.services.\ find_srv_by_name_and_hostname("test_router_0", "POSTFIX") service_dependencies.append(service_dependency_postfix.uuid) service_dependency_cpu = self._scheduler.services.\ find_srv_by_name_and_hostname("test_router_0", "CPU") service_dependencies.append(service_dependency_cpu.uuid) assert set(service_dependencies) == set(dependent_services) def test_c_h_implicithostgroups(self): """ All hosts in the hostgroup get the service dependencies. An host in the group can have its own services dependencies :return: """ self.setup_with_file('cfg/dependencies/servicedependency_implicit_hostgroup.cfg', dispatching=True) assert self.conf_is_correct # Services on host_0 svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc is not None svc_snmp = self._scheduler.services.\ find_srv_by_name_and_hostname("test_host_0", "SNMP") assert svc_snmp is not None svc_postfix = self._scheduler.services.\ find_srv_by_name_and_hostname("test_host_0", "POSTFIX") assert svc_postfix is not None svc_cpu = self._scheduler.services.\ find_srv_by_name_and_hostname("test_host_0", "CPU") assert svc_cpu is not None # Service on router_0 svc_snmp2 = self._scheduler.services.\ find_srv_by_name_and_hostname("test_router_0", "SNMP") assert svc_snmp2 is not None svc_postfix2 = self._scheduler.services.\ find_srv_by_name_and_hostname("test_router_0", "POSTFIX") assert svc_postfix2 is not None # SNMP on the host is in the dependencies of POSTFIX of the host assert svc_snmp.uuid in [c[0] for c in svc_postfix.act_depend_of] # SNMP on the router is in the dependencies of POSTFIX of the router assert svc_snmp2.uuid in [c[0] for c in svc_postfix2.act_depend_of] # host_0 also has its SSH services and dependencies ... svc_postfix = self._scheduler.services.\ find_srv_by_name_and_hostname("test_host_0", "POSTFIX_BYSSH") assert svc_postfix is not None svc_ssh = self._scheduler.services.\ find_srv_by_name_and_hostname("test_host_0", "SSH") assert svc_ssh is not None svc_cpu = self._scheduler.services.\ find_srv_by_name_and_hostname("test_host_0", "CPU_BYSSH") assert svc_cpu is not None assert svc_ssh.uuid in [c[0] for c in svc_postfix.act_depend_of] assert svc_ssh.uuid in [c[0] for c in svc_cpu.act_depend_of] @pytest.mark.skip("Looks broken ... but it is a very non frequent use case. " "Ignoring as of now!") def test_complex_servicedependency(self): """ All hosts in the hosts group get the service dependencies. An host in the group can have its own services dependencies :return: """ self.setup_with_file( 'cfg/dependencies/servicedependency_complex.cfg', dispatching=True) assert self.conf_is_correct for host in self._scheduler.hosts: print("Host: %s" % host) print("- parents: %s" % host.parent_dependencies) # print("- I depend of them: %s" % host.act_depend_of) for uuid in host.parent_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) print("- children: %s" % host.child_dependencies) # print("- They depend of me: %s" % host.act_depend_of_me) for uuid in host.child_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) for svc in self._scheduler.services: print("Service: %s" % svc) print("- parents: %s" % svc.parent_dependencies) # print("- I depend of them: %s" % svc.act_depend_of) for uuid in svc.parent_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) print("- children: %s" % svc.child_dependencies) # print("- They depend of me: %s" % svc.act_depend_of_me) for uuid in svc.child_dependencies: item = self._scheduler.find_item_by_id(uuid) print(" -> %s" % item) svc_nrpe = self._scheduler.services.find_srv_by_name_and_hostname( "myspecifichost", "NRPE") assert svc_nrpe is not None print("NRPE: %s\n- %s" % (svc_nrpe, svc_nrpe.act_depend_of)) # Direct service dependency definition is valid ... svc_load = self._scheduler.services.find_srv_by_name_and_hostname( "myspecifichost", "Load") assert svc_load is not None print("Load: %s\n- %s" % (svc_load, svc_load.act_depend_of)) assert svc_nrpe.uuid in [e[0] for e in svc_load.act_depend_of]
class TestDependencies(AlignakTest): ''' This class test dependencies between services, hosts This is how the tests are named: * test_u_<function_name>: unit test for a function * test_c_*: test configuration * test_a_*: test with only active checks * test_p_*: test with only passive checks * test_ap_*: test with both active and passive checks * test_*_s_*: test simple dependencies (2 dependencies) * test_*_m_*: test complex dependencies (> 2 dependencies) * test_*_h_*: test with hostgroups ''' def setUp(self): pass def test_u_is_enable_action_dependent(self): ''' Test the function is_enable_action_dependent in SchedulingItem :return: None ''' pass def test_u_check_and_set_unreachability(self): ''' Test the function check_and_set_unreachability in SchedulingItem :return: None ''' pass def test_c_dependencies_default(self): ''' Test dependencies correctly loaded from config files :return: None ''' pass def test_c_dependencies_default(self): ''' Test dependencies correctly loaded from config files :return: None ''' pass def test_c_host_passive_service_active(self): ''' Test host passive and service active :return: None ''' pass def test_c_host_passive_service_passive(self): ''' Test host passive and service passive :return: None ''' pass def test_c_host_active_service_passive(self): ''' Test host active and service passive :return: None ''' pass def test_c_host_active_on_host_passive(self): ''' Test host active on host active :return: None ''' pass def test_c_host_passive_on_host_active(self): ''' Test host passive on host active :return: None ''' pass def test_c_host_passive_on_host_passive(self): ''' Test host passive on host passive :return: None ''' pass def test_c_options_x(self): ''' Test conf for 'x' (UNREACHABLE) in act_depend_of :return: ''' pass def test_conf_not_correct_1(self): ''' Test that the arbiter raises an error when there is orphan dependency in config files in hostdependency, dependent_host_name is unknown :return: None ''' pass def test_conf_not_correct_2(self): ''' Test that the arbiter raises an error when we have an orphan dependency in config files in hostdependency, host_name unknown :return: None ''' pass def test_conf_not_correct_3(self): ''' Test that the arbiter raises an error when we have an orphan dependency in config files in host definition, the parent is unknown :return: None ''' pass def test_conf_not_correct_4(self): ''' Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, dependent_service_description is unknown :return: None ''' pass def test_conf_not_correct_5(self): ''' Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, dependent_host_name is unknown :return: None ''' pass def test_conf_not_correct_6(self): ''' Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, host_name unknown :return: None ''' pass def test_conf_not_correct_7(self): ''' Test that the arbiter raises an error when there is orphan dependency in config files in servicedependency, service_description unknown :return: None ''' pass def test_conf_not_correct_8(self): ''' Test that the arbiter raises an error when there is orphan dependency in config files in hostdependency, dependent_hostgroup_name is unknown :return: None ''' pass def test_a_s_service_host_up(self): ''' Test dependency (checks and notifications) between the service and the host (case 1) 08:00:00 check_host OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host OK HARD 08:02:30 check_service CRITICAL HARD :return: None ''' pass def test_a_s_service_host_down(self): ''' Test dependency (checks and notifications) between the service and the host (case 2) 08:00:00 check_host OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host DOWN HARD 08:02:30 check_service CRITICAL HARD :return: None ''' pass def test_a_s_host_host(self): ''' Test the dependency between 2 hosts 08:00:00 check_host OK HARD 08:01:30 check_host (CRITICAL) => router check planned 08:02:30 check_router OK HARD 08:02:30 check_host CRITICAL HARD :return: None ''' pass def test_a_m_service_host_host_up(self): ''' Test the dependencies between service -> host -> host 08:00:00 check_host OK HARD 08:00:00 check_router OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host (CRITICAL HARD) => router check planned 08:02:30 check_router UP HARD 08:02:30 check_host CRITICAL HARD 08:02:30 check_service CRITICAL HARD :return: None ''' pass def test_a_m_service_host_host_critical(self): ''' Test the dependencies between service -> host -> host 08:00:00 check_host OK HARD 08:00:00 check_router OK HARD 08:01:30 check_service (CRITICAL) => host check planned 08:02:30 check_host (CRITICAL HARD) => router check planned 08:02:30 check_router CRITICAL HARD 08:02:30 check_host CRITICAL HARD 08:02:30 check_service CRITICAL HARD :return: None ''' pass def test_a_m_services(self): ''' Test when multiple services dependency the host :return: None ''' pass def test_p_s_service_not_check_passive_host(self): ''' Test passive service critical not check the dependent host (passive) :return: None ''' pass def test_ap_s_passive_service_check_active_host(self): ''' Test passive service critical check the dependent host (active) :return: None ''' pass def test_c_h_hostdep_withno_depname(self): ''' Test for host dependency dispatched on all hosts of an hostgroup 1st solution: define a specific property 2nd solution: define an hostgroup_name and do not define a dependent_hostgroup_name :return: ''' pass def test_c_h_explodehostgroup(self): ''' Test for service dependencies dispatched on all hosts of an hostgroup 1st solution: define a specific property 2nd solution: define an hostgroup_name and do not define a dependent_hostgroup_name :return: ''' pass def test_c_h_implicithostgroups(self): ''' All hosts in the hostgroup get the service dependencies. An host in the group can have its own services dependencies :return: ''' pass @pytest.mark.skip("Looks broken ... but it is a very non frequent use case. " "Ignoring as of now!") def test_complex_servicedependency(self): ''' All hosts in the hosts group get the service dependencies. An host in the group can have its own services dependencies :return: ''' pass
34
32
47
5
32
10
3
0.33
1
5
0
0
32
0
32
87
1,554
202
1,019
146
984
334
897
136
864
20
2
3
97
3,915
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_dispatcher.py
tests.test_dispatcher.TestDispatcher
class TestDispatcher(AlignakTest): """ This class tests the dispatcher (distribute configuration to satellites) """ def setUp(self): """Test starting""" super(TestDispatcher, self).setUp() # Log at DEBUG level self.set_unit_tests_logger_level() def _dispatching(self, env_filename='tests/cfg/dispatcher/simple.ini', loops=3, multi_realms=False): """ Dispatching process: prepare, check, dispatch This function realizes all the dispatching operations: - load a monitoring configuration - prepare the dispatching - dispatch - check the correct dispatching, including: - check the configuration dispatched to the schedulers - check the configuration dispatched to the spare arbiter (if any) - run the check_reachable loop several times if multi_realms is True, the scheduler configuration received are not checked against the arbiter whole configuration. This would be really too complex to assert on this :( Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...) Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...) :return: None """ args = { 'env_file': env_filename, 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master' } my_arbiter = Arbiter(**args) my_arbiter.setup_alignak_logger() # Clear logs self.clear_logs() # my_arbiter.load_modules_manager() my_arbiter.load_monitoring_config_file() assert my_arbiter.conf.conf_is_correct is True # logging.getLogger('alignak').setLevel(logging.DEBUG) objects_map = {} for _, _, strclss, _, _ in list(my_arbiter.conf.types_creations.values()): if strclss in ['hostescalations', 'serviceescalations']: continue objects_list = getattr(my_arbiter.conf, strclss, []) objects_map[strclss] = {'count': len( objects_list), 'str': str(objects_list)} # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list)) # Freeze the time ! initial_datetime = datetime.datetime.now() with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # #1 - Get a new dispatcher my_dispatcher = Dispatcher( my_arbiter.conf, my_arbiter.link_to_myself) print("*** All daemons WS: %s" % ["%s:%s" % (link.address, link.port) for link in my_dispatcher.all_daemons_links]) assert my_dispatcher.dispatch_ok is False assert my_dispatcher.new_to_dispatch is False assert my_dispatcher.first_dispatch_done is False self.assert_any_log_match( re.escape("Dispatcher arbiters/satellites map:")) for link in my_dispatcher.all_daemons_links: self.assert_any_log_match( re.escape(" - %s: %s" % (link.name, link.uri))) # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mr: for link in my_dispatcher.all_daemons_links: mr.get('http://%s:%s/ping' % (link.address, link.port), json='pong') mr.get('http://%s:%s/identity' % (link.address, link.port), json={"running_id": 123456.123456}) mr.get('http://%s:%s/wait_new_conf' % (link.address, link.port), json=True) mr.get('http://%s:%s/fill_initial_broks' % (link.address, link.port), json=[]) mr.post('http://%s:%s/_push_configuration' % (link.address, link.port), json=True) mr.get('http://%s:%s/managed_configurations' % (link.address, link.port), json={}) mr.get('http://%s:%s/do_not_run' % (link.address, link.port), json=True) for link in my_dispatcher.all_daemons_links: # print("Satellite: %s / %s" % (link, link.cfg_to_manage)) assert not link.hash assert not link.push_flavor assert not link.cfg_to_manage assert not link.cfg_managed # #2 - Initialize connection with all our satellites for satellite in my_dispatcher.all_daemons_links: assert my_arbiter.daemon_connection_init(satellite) # All links have a running identifier for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue assert link.running_id == 123456.123456 self.assert_any_log_match(re.escape("got: 123456.123456")) # #3 - Check reachable - a configuration is not yet prepared, # so only check reachable state my_dispatcher.check_reachable() assert my_dispatcher.dispatch_ok is False assert my_dispatcher.first_dispatch_done is False assert my_dispatcher.new_to_dispatch is False self.show_logs() # Not yet configured ... for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "The %s %s do not have a configuration" % ( link.type, link.name) )) # #3 - Check reachable - daemons got pinged too early... my_dispatcher.check_reachable() assert my_dispatcher.dispatch_ok is False assert my_dispatcher.first_dispatch_done is False assert my_dispatcher.new_to_dispatch is False # Only for Python > 2.7, DEBUG logs ... if os.sys.version_info > (2, 7): for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "Too early to ping %s" % link.name )) self.assert_no_log_match(re.escape( "Dispatcher, these daemons are not configured: " "reactionner-master,poller-master,broker-master,receiver-master," "scheduler-master" ", and a configuration is ready to dispatch, run the dispatching..." )) # Time warp 5 seconds - overpass the ping period... self.clear_logs() frozen_datetime.tick(delta=datetime.timedelta(seconds=5)) # #3 - Check reachable - daemons provide their configuration my_dispatcher.check_reachable() assert my_dispatcher.dispatch_ok is False assert my_dispatcher.first_dispatch_done is False assert my_dispatcher.new_to_dispatch is False # Only for Python > 2.7, DEBUG logs ... if os.sys.version_info > (2, 7): # Still not configured ... for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "My (%s) fresh managed configuration: {}" % link.name )) # #4 - Prepare dispatching assert my_dispatcher.new_to_dispatch is False my_dispatcher.prepare_dispatch() assert my_dispatcher.dispatch_ok is False assert my_dispatcher.first_dispatch_done is False assert my_dispatcher.new_to_dispatch is True self.assert_any_log_match(re.escape( "All configuration parts are assigned to schedulers and their satellites :)" )) # All links have a hash, push_flavor and cfg_to_manage for link in my_dispatcher.all_daemons_links: print("Link: %s" % link) assert getattr(link, 'hash', None) is not None assert getattr(link, 'push_flavor', None) is not None assert getattr(link, 'cfg_to_manage', None) is not None assert not link.cfg_managed # Not yet # #5 - Check reachable - a configuration is prepared, # this will force the daemons communication, no need for a time warp ;) my_dispatcher.check_reachable() # Only for Python > 2.7, DEBUG logs ... if os.sys.version_info > (2, 7): for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "My (%s) fresh managed configuration: {}" % link.name )) self.assert_any_log_match(re.escape( "Dispatcher, these daemons are not configured:")) self.assert_any_log_match(re.escape( ", and a configuration is ready to dispatch, run the dispatching...")) self.assert_any_log_match(re.escape( "Trying to send configuration to the satellites...")) for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "Sending configuration to the %s %s" % (link.type, link.name))) # As of now the configuration is prepared and was dispatched to the daemons ! # Configuration already dispatched! with pytest.raises(DispatcherError): my_dispatcher.dispatch() self.show_logs() # Hack the requests history to check and simulate the configuration pushed... history = mr.request_history for index, request in enumerate(history): if '_push_configuration' in request.url: received = request.json() print(index, request.url, received) assert ['conf'] == list(received.keys()) conf = received['conf'] from pprint import pprint pprint(conf) assert 'alignak_name' in conf assert conf['alignak_name'] == 'My Alignak' assert 'self_conf' in conf assert conf['self_conf'] i_am = None for link in my_dispatcher.all_daemons_links: if link.type == conf['self_conf']['type'] \ and link.name == conf['self_conf']['name']: i_am = link break else: assert False print(("I am: %s" % i_am)) print(("I have: %s" % conf)) # All links have a hash, push_flavor and cfg_to_manage assert 'hash' in conf assert 'managed_conf_id' in conf assert 'arbiters' in conf if conf['self_conf']['manage_arbiters']: # All the known arbiters assert list(conf['arbiters'].keys()) == [arbiter_link.uuid for arbiter_link in my_dispatcher.arbiters] else: assert conf['arbiters'] == {} assert 'schedulers' in conf # Hack for the managed configurations link.cfg_managed = {} for scheduler_link in list(conf['schedulers'].values()): link.cfg_managed[scheduler_link['instance_id']] = { 'hash': scheduler_link['hash'], 'push_flavor': scheduler_link['push_flavor'], 'managed_conf_id': scheduler_link['managed_conf_id'] } print("Managed: %s" % link.cfg_managed) assert 'modules' in conf assert conf['modules'] == [] # Spare arbiter specific if '8770/_push_configuration' in request.url: # Spare arbiter receives all the monitored configuration assert 'whole_conf' in conf # String serialized configuration assert isinstance(conf['whole_conf'], string_types) managed_conf_part = unserialize(conf['whole_conf']) # Test a property to be sure conf loaded correctly assert managed_conf_part.instance_id == conf['managed_conf_id'] # The spare arbiter got the same objects count as the master arbiter prepared! for _, _, strclss, _, _ in list(managed_conf_part.types_creations.values()): # These elements are not included in the serialized configuration! if strclss in ['hostescalations', 'serviceescalations', 'arbiters', 'schedulers', 'brokers', 'pollers', 'reactionners', 'receivers', 'realms', 'modules', 'hostsextinfo', 'servicesextinfo', 'hostdependencies', 'servicedependencies']: continue objects_list = getattr( managed_conf_part, strclss, []) # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list)) # Count and string dup are the same ! assert len( objects_list) == objects_map[strclss]['count'] assert str( objects_list) == objects_map[strclss]['str'] # Scheduler specific elif '7768/_push_configuration' in request.url: assert 'conf_part' in conf # String serialized configuration # assert isinstance(conf['conf_part'], string_types) managed_conf_part = unserialize(conf['conf_part']) # Test a property to be sure conf loaded correctly assert managed_conf_part.instance_id == conf['managed_conf_id'] # Hack for the managed configurations link.cfg_managed = { conf['instance_id']: { 'hash': conf['hash'], 'push_flavor': conf['push_flavor'], 'managed_conf_id': conf['managed_conf_id'] } } print("Managed: %s" % link.cfg_managed) # The scheduler got the same objects count as the arbiter prepared! for _, _, strclss, _, _ in list(managed_conf_part.types_creations.values()): # These elements are not included in the serialized configuration! if strclss in ['hostescalations', 'serviceescalations', 'arbiters', 'schedulers', 'brokers', 'pollers', 'reactionners', 'receivers', 'realms', 'modules', 'hostsextinfo', 'servicesextinfo', 'hostdependencies', 'servicedependencies']: continue objects_list = getattr( managed_conf_part, strclss, []) # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list)) if not multi_realms: # Count and string dump are the same ! assert len( objects_list) == objects_map[strclss]['count'] assert str( objects_list) == objects_map[strclss]['str'] else: # Satellites print("I am: ") print(index, request.url, received) assert 'conf_part' not in conf assert 'see_my_schedulers' == conf['managed_conf_id'] for link in my_dispatcher.all_daemons_links: mr.get('http://%s:%s/managed_configurations' % (link.address, link.port), json=link.cfg_managed) print("Check dispatching:") self.clear_logs() # assert my_dispatcher.check_dispatch() is True dispatched = my_dispatcher.check_dispatch() self.show_logs() assert dispatched for loop_count in range(0, loops): for tw in range(0, 4): # Time warp 1 second frozen_datetime.tick( delta=datetime.timedelta(seconds=1)) print("Check reachable %s" % tw) self.clear_logs() my_dispatcher.check_reachable() # Only for Python > 2.7, DEBUG logs ... if os.sys.version_info > (2, 7): for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "Too early to ping %s" % (link.name) )) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) print("Check reachable response") self.clear_logs() my_dispatcher.check_reachable() self.show_logs() # Only for Python > 2.7, DEBUG logs ... if os.sys.version_info > (2, 7): for link in my_dispatcher.all_daemons_links: if link == my_dispatcher.arbiter_link: continue self.assert_any_log_match(re.escape( "My (%s) fresh managed configuration: %s" % (link.name, link.cfg_managed) )) def test_bad_init(self): """ Test that: - bad configuration - two master arbiters are not correct and raise an exception! :return: None """ args = { 'env_file': os.path.join(self._test_dir, 'cfg/dispatcher/two_master_arbiters.ini'), 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master' } self.my_arbiter = Arbiter(**args) # Get a new dispatcher - raise an exception with pytest.raises(DispatcherError): Dispatcher(None, self.my_arbiter.link_to_myself) # Get a new dispatcher - raise an exception with pytest.raises(DispatcherError): Dispatcher(self.my_arbiter.conf, None) # Prepare the Alignak configuration # self.my_arbiter.load_modules_manager() self.my_arbiter.load_monitoring_config_file() assert self.my_arbiter.conf.conf_is_correct is True # Get a new dispatcher - raise an exception (two master arbiters) with pytest.raises(DispatcherError): Dispatcher(self.my_arbiter.conf, self.my_arbiter.link_to_myself) def test_dispatching_simple(self): """ Test the dispatching process: simple configuration :return: None """ self._dispatching() def test_dispatching_multiple_schedulers(self): """ Test the dispatching process: 1 realm, 2 schedulers :return: None """ self._dispatching(os.path.join( self._test_dir, 'cfg/dispatcher/simple_multi_schedulers.ini'), multi_realms=True) def test_dispatching_multiple_pollers(self): """ Test the dispatching process: 1 realm, 2 pollers :return: None """ self._dispatching(os.path.join( self._test_dir, 'cfg/dispatcher/simple_multi_pollers.ini')) def test_dispatching_multiple_realms(self): """ Test the dispatching process: 2 realms, all daemons duplicated :return: None """ self._dispatching(os.path.join( self._test_dir, 'cfg/dispatcher/2-realms.ini'), multi_realms=True) def test_dispatching_multiple_realms_sub_realms(self): """ Test the dispatching process: 2 realms, some daemons are sub_realms managers realm All: * 1 scheduler * 1 receiver realm realm2: * 1 receiver * 1 scheduler * 1 poller realm All + realm2 (sub realm): * 1 broker * 1 poller * 1 reactionner realm realm3: * 1 receiver * 1 scheduler * 1 reactionner * 1 broker * 1 poller :return: None """ self._dispatching(os.path.join( self._test_dir, 'cfg/dispatcher/realms_with_sub_realms.ini'), multi_realms=True) def test_dispatching_multiple_realms_sub_realms_multi_schedulers(self): """ Test the dispatching process: 2 realms, some daemons are sub_realms managers and we have several schedulers. daemons with (+) are manage_sub_realms=1 realm All (6 hosts): * 2 schedulers (+) realm All / All1 (6 hosts): * 3 schedulers (+) realm All / All1 / All1a (4 hosts): * 2 schedulers (+) :return: None """ self._dispatching(os.path.join(self._test_dir, 'cfg/dispatcher/realms_with_sub_realms_multi_schedulers.ini'), multi_realms=True) @pytest.mark.skip("Currently disabled - spare feature - and whatever this test seems broken!") def test_dispatching_spare_arbiter(self): """ Test the dispatching process: 1 realm, 1 spare arbiter :return: None """ self._dispatching(os.path.join( self._test_dir, 'cfg/dispatcher/spare_arbiter.ini'))
class TestDispatcher(AlignakTest): ''' This class tests the dispatcher (distribute configuration to satellites) ''' def setUp(self): '''Test starting''' pass def _dispatching(self, env_filename='tests/cfg/dispatcher/simple.ini', loops=3, multi_realms=False): ''' Dispatching process: prepare, check, dispatch This function realizes all the dispatching operations: - load a monitoring configuration - prepare the dispatching - dispatch - check the correct dispatching, including: - check the configuration dispatched to the schedulers - check the configuration dispatched to the spare arbiter (if any) - run the check_reachable loop several times if multi_realms is True, the scheduler configuration received are not checked against the arbiter whole configuration. This would be really too complex to assert on this :( Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...) Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...) :return: None ''' pass def test_bad_init(self): ''' Test that: - bad configuration - two master arbiters are not correct and raise an exception! :return: None ''' pass def test_dispatching_simple(self): ''' Test the dispatching process: simple configuration :return: None ''' pass def test_dispatching_multiple_schedulers(self): ''' Test the dispatching process: 1 realm, 2 schedulers :return: None ''' pass def test_dispatching_multiple_pollers(self): ''' Test the dispatching process: 1 realm, 2 pollers :return: None ''' pass def test_dispatching_multiple_realms(self): ''' Test the dispatching process: 2 realms, all daemons duplicated :return: None ''' pass def test_dispatching_multiple_realms_sub_realms(self): ''' Test the dispatching process: 2 realms, some daemons are sub_realms managers realm All: * 1 scheduler * 1 receiver realm realm2: * 1 receiver * 1 scheduler * 1 poller realm All + realm2 (sub realm): * 1 broker * 1 poller * 1 reactionner realm realm3: * 1 receiver * 1 scheduler * 1 reactionner * 1 broker * 1 poller :return: None ''' pass def test_dispatching_multiple_realms_sub_realms_multi_schedulers(self): ''' Test the dispatching process: 2 realms, some daemons are sub_realms managers and we have several schedulers. daemons with (+) are manage_sub_realms=1 realm All (6 hosts): * 2 schedulers (+) realm All / All1 (6 hosts): * 3 schedulers (+) realm All / All1 / All1a (4 hosts): * 2 schedulers (+) :return: None ''' pass @pytest.mark.skip("Currently disabled - spare feature - and whatever this test seems broken!") def test_dispatching_spare_arbiter(self): ''' Test the dispatching process: 1 realm, 1 spare arbiter :return: None ''' pass
12
11
48
7
29
13
5
0.45
1
10
3
0
10
2
10
65
493
76
288
36
275
130
222
32
210
44
2
7
53
3,916
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_external_commands.py
tests.test_external_commands.TestExternalCommands
class TestExternalCommands(AlignakTest): """ This class tests the external commands """ def setUp(self): super(TestExternalCommands, self).setUp() self.setup_with_file('cfg/cfg_external_commands.cfg', dispatching=True) assert self.conf_is_correct # self.show_logs() # No error messages assert len(self.configuration_errors) == 0 # No warning messages assert len(self.configuration_warnings) == 0 # Set / reset as default applyer for external commands self.ecm_mode = 'applyer' def test_basic_external_command(self): """ Basic tests for the external command :return: """ # Some external commands in a list external_commands = [] excmd = ExternalCommand('[%d] ACKNOWLEDGE_HOST_PROBLEM;test_host_fred;2;1;1;admin;' 'Acknowledge requested from WebUI' % time.time()) external_commands.append(excmd) excmd = ExternalCommand('[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_1;unknownservice;' '1;Service is WARNING|rtt=9999;5;10;0;10000' % time.time()) external_commands.append(excmd) # Serialize to send to another daemon print("Commands: %s" % external_commands) for cmd in external_commands: print("- %s" % cmd.__dict__) res = serialize(external_commands, True) print("Serialized: %s" % res) # Unserialize when received by a daemon result = unserialize(res, True) print("Unserialized: %s" % result) for cmd in result: print("- %s" % cmd.__dict__) assert isinstance(cmd, ExternalCommand) assert cmd.__class__.my_type == "externalcommand" def test__command_syntax_receiver(self): self.accept_passive_unknown_check_results = True self.ecm_mode = 'receiver' self._command_syntax() @pytest.mark.skip("Temporarily disable - ext commands refactoring needed!") def test__command_syntax_dispatcher(self): self.accept_passive_unknown_check_results = True self.ecm_mode = 'dispatcher' self._command_syntax() def test__command_syntax_applyer(self): self.accept_passive_unknown_check_results = True self.ecm_mode = 'applyer' self._command_syntax() def _command_syntax(self): """ External command parsing - named as test__ to be the first executed test :) :return: None """ now = int(time.time()) # --- # Lowercase command is allowed self.clear_logs() self._main_broker.broks = [] excmd = '[%d] command' % (now) res = self.manage_external_command(excmd) # Resolve command result is None because the command is not recognized assert res is None self.assert_any_log_match( re.escape(u"WARNING: [arbiter-master.alignak.external_command] " u"External command 'command' is not recognized, sorry") ) # --- # Some commands are not implemented self.clear_logs() self._main_broker.broks = [] excmd = '[%d] shutdown_program' % (now) res = self.manage_external_command(excmd) if self.ecm_mode == 'applyer': self.assert_any_log_match( re.escape(u"WARNING: [arbiter-master.alignak.external_command] " u"The external command 'SHUTDOWN_PROGRAM' is not currently " u"implemented in Alignak.") ) else: # Resolve command result is not None because the command is recognized print("Result (mode=%s): %s" % (self.ecm_mode, res)) assert res is not None # --- # Command may not have a timestamp self.clear_logs() self._main_broker.broks = [] excmd = 'shutdown_program' res = self.manage_external_command(excmd) if self.ecm_mode == 'applyer': self.assert_any_log_match( re.escape("WARNING: [arbiter-master.alignak.external_command] The external command " "'SHUTDOWN_PROGRAM' is not currently implemented in Alignak.") ) else: # Resolve command result is not None because the command is recognized print("Result (mode=%s): %s" % (self.ecm_mode, res)) assert res is not None # --- # Timestamp must be an integer self.clear_logs() self._main_broker.broks = [] excmd = '[fake] shutdown_program' res = self.manage_external_command(excmd) # Resolve command result is not None because the command is recognized assert res is None self.assert_any_log_match( re.escape("WARNING: [arbiter-master.alignak.external_command] Malformed command " "'[fake] shutdown_program'") ) # --- # Malformed command self.clear_logs() self._main_broker.broks = [] excmd = '[%d] MALFORMED COMMAND' % now res = self.manage_external_command(excmd) assert res is None if self.ecm_mode == 'applyer': expected_logs = [ ('error', "Command '[%s] command' is not recognized, sorry" % now), ('info', 'EXTERNAL COMMAND: [%s] shutdown_program' % now), ('warning', "SHUTDOWN_PROGRAM: this command is not implemented!"), ('info', "EXTERNAL COMMAND: shutdown_program"), ('warning', "SHUTDOWN_PROGRAM: this command is not implemented!"), ('error', "Malformed command: '[fake] shutdown_program'"), ('error', "Malformed command: '[%s] MALFORMED COMMAND'" % now), ] self.check_monitoring_events_log(expected_logs) self.clear_events() # ...and some logs self.assert_any_log_match("Malformed command") self.assert_any_log_match('MALFORMED COMMAND') # self.assert_any_log_match("Malformed command exception: too many values to unpack") # --- # Malformed command self.clear_logs() self._main_broker.broks = [] excmd = '[%d] ADD_HOST_COMMENT;test_host_0;1;qdsqd' % now res = self.manage_external_command(excmd) if self.ecm_mode == 'applyer': expected_logs = [ ('error', "Arguments are not correct for the command: '[%s] ADD_HOST_COMMENT;test_host_0;1;qdsqd'" % now) ] self.check_monitoring_events_log(expected_logs) self.clear_events() # ...and some logs self.assert_any_log_match("Sorry, the arguments for the command") # --- # Unknown command self.clear_logs() self._main_broker.broks = [] excmd = '[%d] UNKNOWN_COMMAND' % now res = self.manage_external_command(excmd) if self.ecm_mode == 'applyer': expected_logs = [ ('error', "Command '[%s] UNKNOWN_COMMAND' is not recognized, sorry" % now) ] self.check_monitoring_events_log(expected_logs) self.clear_events() # ...and some logs self.assert_any_log_match( "External command 'unknown_command' is not recognized, sorry") else: # Resolve command result is not None because the command is recognized print("Result unknown command (mode=%s): %s" % (self.ecm_mode, res)) assert res is None #  --- # External command: unknown host self.clear_logs() self._main_broker.broks = [] excmd = '[%d] DISABLE_HOST_CHECK;not_found_host' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() if self.ecm_mode == 'applyer': # No log when receiving for an unknown host ! expected_logs = [ ] self.check_monitoring_events_log(expected_logs) self.clear_events() self.show_logs() # ...and a warning log! if not self.accept_passive_unknown_check_results: self.assert_any_log_match("A command was received for the host 'not_found_host', " "but the host could not be found!") else: # Resolve command result is not None because the command is recognized print("Result host check command (mode=%s): %s" % (self.ecm_mode, res)) assert res is None # Now test different types of commands # ----- #  Get an host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host is not None #  Get a service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc.customs is not None #  and a contact... contact = self._scheduler.contacts[host.contacts[0]] assert contact is not None assert contact.contact_name == "test_contact" #  --- # A global host command assert self._scheduler.external_commands_manager.my_conf.check_host_freshness now = time.time() excmd = '[%d] DISABLE_HOST_FRESHNESS_CHECKS' % now res = self.manage_external_command(excmd) print("Result (mode=%s): %s" % (self.ecm_mode, res)) if self.ecm_mode == 'applyer': # Command is supposed to be managed assert res is None else: # Command is to be managed by another daemon assert res == { 'cmd': '[%d] DISABLE_HOST_FRESHNESS_CHECKS' % now, 'global': True} #  --- # A specific host command assert host.notifications_enabled assert svc.notifications_enabled self.clear_logs() self._main_broker.broks = [] excmd = '[%d] DISABLE_HOST_NOTIFICATIONS;test_host_0' % time.time() res = self.manage_external_command(excmd) print("Result (mode=%s): %s" % (self.ecm_mode, res)) self.show_logs() # Command is supposed to be managed assert res is None def test_several_commands(self): """ External command management - several commands at once :return: None """ now = int(time.time()) # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # Unknown command excmds = [] excmds.append('[%d] DISABLE_EVENT_HANDLERS' % time.time()) excmds.append('[%d] ENABLE_EVENT_HANDLERS' % time.time()) # Call the scheduler method to run several commands at once self._scheduler.run_external_commands(excmds) self.external_command_loop() # We get an 'monitoring_log' brok for logging to the monitoring logs... # broks = [b for b in self._main_broker.broks # if b.type == 'monitoring_log'] # assert len(broks) == 2 expected_logs = [ ('info', 'EXTERNAL COMMAND: [%s] DISABLE_EVENT_HANDLERS' % now), ('info', 'EXTERNAL COMMAND: [%s] ENABLE_EVENT_HANDLERS' % now) ] self.check_monitoring_events_log(expected_logs) def test_change_and_reset_host_modattr(self): """ Change and reset modified attributes for an host :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") #  --- # External command: change host attribute excmd = '[%d] CHANGE_HOST_MODATTR;test_host_0;1' % time.time() self.manage_external_command(excmd) # Notifications are now disabled assert not getattr( host, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) assert 1 == host.modified_attributes # External command: change host attribute excmd = '[%d] CHANGE_HOST_MODATTR;test_host_0;1' % time.time() self.manage_external_command(excmd) # Notifications are now enabled assert getattr( host, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) assert 0 == host.modified_attributes #  --- # External command: change host attribute (non boolean attribute) excmd = '[%d] CHANGE_HOST_MODATTR;test_host_0;65536' % time.time() self.manage_external_command(excmd) # Notifications are now disabled assert 65536 == host.modified_attributes # External command: change host attribute excmd = '[%d] CHANGE_HOST_MODATTR;test_host_0;65536' % time.time() self.manage_external_command(excmd) # Notifications are now enabled assert 0 == host.modified_attributes #  --- # External command: change host attribute (several attributes in one command) excmd = '[%d] CHANGE_HOST_MODATTR;test_host_0;3' % time.time() self.manage_external_command(excmd) # Notifications are now disabled assert not getattr( host, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) # Active checks are now disabled assert not getattr( host, DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].attribute) assert 3 == host.modified_attributes # External command: change host attribute (several attributes in one command) excmd = '[%d] CHANGE_HOST_MODATTR;test_host_0;3' % time.time() self.manage_external_command(excmd) # Notifications are now enabled assert getattr( host, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) # Active checks are now enabled assert getattr( host, DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].attribute) assert 0 == host.modified_attributes def test_change_and_reset_service_modattr(self): """ Change and reset modified attributes for a service :return: None """ #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") #  --- # External command: change service attribute excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;1' % time.time() self.manage_external_command(excmd) # Notifications are now disabled assert not getattr( svc, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) assert 1 == svc.modified_attributes # External command: change service attribute excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;1' % time.time() self.manage_external_command(excmd) # Notifications are now enabled assert getattr( svc, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) assert 0 == svc.modified_attributes #  --- # External command: change service attribute (non boolean attribute) excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;65536' % time.time() self.manage_external_command(excmd) # Notifications are now disabled assert 65536 == svc.modified_attributes # External command: change service attribute excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;65536' % time.time() self.manage_external_command(excmd) # Notifications are now enabled assert 0 == svc.modified_attributes #  --- # External command: change service attribute (several attributes in one command) excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;3' % time.time() self.manage_external_command(excmd) # Notifications are now disabled assert not getattr( svc, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) # Active checks are now disabled assert not getattr( svc, DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].attribute) assert 3 == svc.modified_attributes # External command: change service attribute (several attributes in one command) excmd = '[%d] CHANGE_SVC_MODATTR;test_host_0;test_ok_0;3' % time.time() self.manage_external_command(excmd) # Notifications are now enabled assert getattr( svc, DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].attribute) # Active checks are now enabled assert getattr( svc, DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].attribute) assert 0 == svc.modified_attributes def test_change_and_reset_contact_modattr(self): """ Change an Noned reset modified attributes for a contact :return: None """ #  A contact... host = self._scheduler.hosts.find_by_name("test_host_0") contact = self._scheduler.contacts[host.contacts[0]] assert contact is not None assert contact.contact_name == "test_contact" #  --- # External command: change contact attribute excmd = '[%d] CHANGE_CONTACT_MODATTR;test_contact;1' % time.time() self.manage_external_command(excmd) assert 1 == contact.modified_attributes # External command: change contact attribute excmd = '[%d] CHANGE_CONTACT_MODATTR;test_contact;1' % time.time() self.manage_external_command(excmd) # No toggle assert 1 == contact.modified_attributes #  --- # External command: change contact attribute assert 0 == contact.modified_host_attributes excmd = '[%d] CHANGE_CONTACT_MODHATTR;test_contact;1' % time.time() self.manage_external_command(excmd) assert 1 == contact.modified_host_attributes # External command: change contact attribute excmd = '[%d] CHANGE_CONTACT_MODHATTR;test_contact;1' % time.time() self.manage_external_command(excmd) # No toggle assert 1 == contact.modified_host_attributes #  --- # External command: change contact attribute assert 0 == contact.modified_service_attributes excmd = '[%d] CHANGE_CONTACT_MODSATTR;test_contact;1' % time.time() self.manage_external_command(excmd) assert 1 == contact.modified_service_attributes # External command: change contact attribute excmd = '[%d] CHANGE_CONTACT_MODSATTR;test_contact;1' % time.time() self.manage_external_command(excmd) # No toggle assert 1 == contact.modified_service_attributes # Note that the value is simply stored and not controled in any way ... def test_change_host_attributes(self): """ Change host attributes :return: None """ # A TP... tp = self._scheduler.timeperiods.find_by_name("24x7") assert tp.timeperiod_name == "24x7" tp2 = self._scheduler.timeperiods.find_by_name("none") assert tp2.timeperiod_name == "none" # A command... command = self._scheduler.commands.find_by_name("check-host-alive") assert command.command_name == "check-host-alive" command2 = self._scheduler.commands.find_by_name( "check-host-alive-parent") assert command2.command_name == "check-host-alive-parent" # An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host.customs is not None assert host.get_check_command() == "check-host-alive-parent" # assert host.get_check_command() == \ # "check-host-alive-parent!up!$HOSTSTATE:test_router_0$" assert host.customs['_OSLICENSE'] == 'gpl' assert host.customs['_OSTYPE'] == 'gnulinux' # Todo: check if it is normal ... host.check_period is the TP uuid and not an object! assert host.check_period == tp.uuid #  A contact... contact = self._scheduler.contacts[host.contacts[0]] assert contact is not None assert contact.contact_name == "test_contact" # Todo: check if it is normal ... contact.host_notification_period is the TP name # and not an object! assert contact.host_notification_period == tp.timeperiod_name assert contact.service_notification_period == tp.timeperiod_name #  --- # External command: change check command host.modified_attributes = 0 excmd = '[%d] CHANGE_HOST_CHECK_COMMAND;test_host_0;check-host-alive' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.get_check_command() == "check-host-alive" assert 512 == host.modified_attributes #  --- # External command: change check period host.modified_attributes = 0 excmd = '[%d] CHANGE_HOST_CHECK_TIMEPERIOD;test_host_0;none' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Todo: now, check period is an object and no more a TP uuid! assert host.check_period == tp2 assert 16384 == host.modified_attributes #  --- # External command: change event handler host.modified_attributes = 0 excmd = '[%d] CHANGE_HOST_EVENT_HANDLER;test_host_0;check-host-alive' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.get_check_command() == "check-host-alive" assert 256 == host.modified_attributes #  --- # External command: change snapshot command host.modified_attributes = 0 excmd = '[%d] CHANGE_HOST_SNAPSHOT_COMMAND;test_host_0;check-host-alive' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.get_snapshot_command() == "check-host-alive" assert 256 == host.modified_attributes #  --- # External command: max host check attempts host.modified_attributes = 0 excmd = '[%d] CHANGE_MAX_HOST_CHECK_ATTEMPTS;test_host_0;5' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert getattr( host, DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].attribute) == 5 assert 4096 == host.modified_attributes #  --- # External command: retry host check interval host.modified_attributes = 0 excmd = '[%d] CHANGE_NORMAL_HOST_CHECK_INTERVAL;test_host_0;21' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert getattr( host, DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].attribute) == 21 assert 1024 == host.modified_attributes #  --- # External command: retry host check interval host.modified_attributes = 0 excmd = '[%d] CHANGE_RETRY_HOST_CHECK_INTERVAL;test_host_0;42' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert getattr( host, DICT_MODATTR["MODATTR_RETRY_CHECK_INTERVAL"].attribute) == 42 assert 2048 == host.modified_attributes #  --- # External command: change host custom var - undefined variable host.modified_attributes = 0 # Not existing assert '_UNDEFINED' not in host.customs excmd = '[%d] CHANGE_CUSTOM_HOST_VAR;test_host_0;_UNDEFINED;other' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Not existing assert '_UNDEFINED' not in host.customs assert 0 == host.modified_attributes # External command: change host custom var host.modified_attributes = 0 excmd = '[%d] CHANGE_CUSTOM_HOST_VAR;test_host_0;_OSLICENSE;other' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.customs['_OSLICENSE'] == 'other' assert 32768 == host.modified_attributes #  --- # External command: delay host first notification host.modified_attributes = 0 assert host.first_notification_delay == 0 excmd = '[%d] DELAY_HOST_NOTIFICATION;test_host_0;10' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.first_notification_delay == 10 def test_change_service_attributes(self): """Change service attributes :return: None """ # A TP... tp = self._scheduler.timeperiods.find_by_name("24x7") assert tp.timeperiod_name == "24x7" tp2 = self._scheduler.timeperiods.find_by_name("none") assert tp2.timeperiod_name == "none" # A command... command = self._scheduler.commands.find_by_name("check-host-alive") assert command.command_name == "check-host-alive" command2 = self._scheduler.commands.find_by_name( "check-host-alive-parent") assert command2.command_name == "check-host-alive-parent" # An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host.customs is not None assert host.get_check_command() == "check-host-alive-parent" assert host.customs['_OSLICENSE'] == 'gpl' assert host.customs['_OSTYPE'] == 'gnulinux' # Todo: check if it is normal ... host.check_period is the TP uuid and not an object! assert host.check_period == tp.uuid # A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc is not None assert svc.get_check_command() == "check_service" assert svc.customs['_CUSTNAME'] == 'custvalue' # Todo: check if it is normal ... host.check_period is the TP uuid and not an object! assert svc.check_period == tp.uuid # A contact... contact = self._scheduler.contacts[host.contacts[0]] assert contact is not None assert contact.contact_name == "test_contact" # Todo: check if it is normal ... contact.host_notification_period is the TP name # and not an object! assert contact.host_notification_period == tp.timeperiod_name assert contact.service_notification_period == tp.timeperiod_name #  --- # External command: change check command svc.modified_attributes = 0 excmd = '[%d] CHANGE_SVC_CHECK_COMMAND;test_host_0;test_ok_0;check-host-alive' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.get_check_command() == "check-host-alive" assert 512 == svc.modified_attributes #  --- # External command: change notification period svc.modified_attributes = 0 excmd = '[%d] CHANGE_SVC_NOTIFICATION_TIMEPERIOD;test_host_0;test_ok_0;none' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Todo: now, check period is an object and no more a TP uuid! assert svc.notification_period == tp2 assert 65536 == svc.modified_attributes #  --- # External command: change check period svc.modified_attributes = 0 excmd = '[%d] CHANGE_SVC_CHECK_TIMEPERIOD;test_host_0;test_ok_0;none' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Todo: now, check period is an object and no more a TP uuid! assert svc.check_period == tp2 assert 16384 == svc.modified_attributes #  --- # External command: change event handler svc.modified_attributes = 0 excmd = '[%d] CHANGE_SVC_EVENT_HANDLER;test_host_0;test_ok_0;check-host-alive' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.get_check_command() == "check-host-alive" assert 256 == svc.modified_attributes #  --- # External command: change snapshot command svc.modified_attributes = 0 excmd = '[%d] CHANGE_SVC_SNAPSHOT_COMMAND;test_host_0;test_ok_0;check-host-alive' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.get_snapshot_command() == "check-host-alive" assert 256 == svc.modified_attributes #  --- # External command: max service check attempts svc.modified_attributes = 0 excmd = '[%d] CHANGE_MAX_SVC_CHECK_ATTEMPTS;test_host_0;test_ok_0;5' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert getattr( svc, DICT_MODATTR["MODATTR_MAX_CHECK_ATTEMPTS"].attribute) == 5 assert 4096 == svc.modified_attributes #  --- # External command: retry service check interval svc.modified_attributes = 0 excmd = '[%d] CHANGE_NORMAL_SVC_CHECK_INTERVAL;test_host_0;test_ok_0;21' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert getattr( svc, DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].attribute) == 21 assert 1024 == svc.modified_attributes #  --- # External command: retry service check interval svc.modified_attributes = 0 excmd = '[%d] CHANGE_RETRY_SVC_CHECK_INTERVAL;test_host_0;test_ok_0;42' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert getattr( svc, DICT_MODATTR["MODATTR_RETRY_CHECK_INTERVAL"].attribute) == 42 assert 2048 == svc.modified_attributes #  --- # External command: change service custom var - undefined variable svc.modified_attributes = 0 # Not existing assert '_UNDEFINED' not in svc.customs excmd = '[%d] CHANGE_CUSTOM_SVC_VAR;test_host_0;test_ok_0;_UNDEFINED;other' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Not existing assert '_UNDEFINED' not in svc.customs assert 0 == svc.modified_attributes # External command: change service custom var svc.modified_attributes = 0 excmd = '[%d] CHANGE_CUSTOM_SVC_VAR;test_host_0;test_ok_0;_CUSTNAME;other' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.customs['_CUSTNAME'] == 'other' assert 32768 == svc.modified_attributes #  --- # External command: delay service first notification svc.modified_attributes = 0 assert svc.first_notification_delay == 0 excmd = '[%d] DELAY_SVC_NOTIFICATION;test_host_0;test_ok_0;10' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.first_notification_delay == 10 def test_change_contact_attributes(self): """ Change contact attributes :return: None """ #  A TP... tp = self._scheduler.timeperiods.find_by_name("24x7") assert tp.timeperiod_name == "24x7" tp2 = self._scheduler.timeperiods.find_by_name("none") assert tp2.timeperiod_name == "none" #  A contact... host = self._scheduler.hosts.find_by_name("test_host_0") contact = self._scheduler.contacts[host.contacts[0]] assert contact is not None assert contact.contact_name == "test_contact" # Todo: check if it is normal ... contact.host_notification_period is the TP name # and not an object! assert contact.host_notification_period == tp.timeperiod_name assert contact.service_notification_period == tp.timeperiod_name # Issue #487: no customs for contacts ... assert contact.customs is not None assert contact.customs['_VAR1'] == '10' assert contact.customs['_VAR2'] == 'text' #  --- # External command: change contact attribute contact.modified_host_attributes = 0 excmd = '[%d] CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;test_contact;none' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Todo: now, TP is an object and no more a TP name! assert contact.host_notification_period == tp2 assert 65536 == contact.modified_host_attributes #  --- # External command: change contact attribute contact.modified_service_attributes = 0 excmd = '[%d] CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD;test_contact;none' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Todo: now, TP is an object and no more a TP name! assert contact.service_notification_period == tp2 assert 65536 == contact.modified_service_attributes #  --- # External command: change service custom var - undefined variable contact.modified_attributes = 0 # Not existing assert '_UNDEFINED' not in contact.customs excmd = '[%d] CHANGE_CUSTOM_CONTACT_VAR;test_host_0;test_ok_0;_UNDEFINED;other' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Not existing assert '_UNDEFINED' not in contact.customs assert 0 == contact.modified_attributes # External command: change contact custom var # Issue #487: no customs for contacts ... contact.modified_attributes = 0 excmd = '[%d] CHANGE_CUSTOM_CONTACT_VAR;test_contact;_VAR1;20' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert contact.customs['_VAR1'] == '20' assert 32768 == contact.modified_attributes @freeze_time("2017-06-01 18:30:00") def test_host_comments(self): """ Test the comments for hosts :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host.customs is not None assert host.get_check_command() == "check-host-alive-parent" # assert host.get_check_command() == \ # "check-host-alive-parent!up!$HOSTSTATE:test_router_0$" assert host.customs['_OSLICENSE'] == 'gpl' assert host.customs['_OSTYPE'] == 'gnulinux' assert host.comments == {} now = int(time.time()) #  --- # External command: add an host comment assert host.comments == {} excmd = '[%d] ADD_HOST_COMMENT;test_host_0;1;test_contact;My comment' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.comments) == 1 comment = list(host.comments.values())[0] assert comment.comment == "My comment" assert comment.author == "test_contact" #  --- # External command: add another host comment excmd = '[%d] ADD_HOST_COMMENT;test_host_0;1;test_contact;My comment 2' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.comments) == 2 #  --- # External command: yet another host comment excmd = '[%d] ADD_HOST_COMMENT;test_host_0;1;test_contact;' \ 'My accented é"{|:âàç comment' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.comments) == 3 #  --- # External command: delete an host comment (unknown comment) excmd = '[%d] DEL_HOST_COMMENT;unknown_id' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(host.comments) == 3 #  --- # External command: delete an host comment c_id = list(host.comments)[0] excmd = '[%d] DEL_HOST_COMMENT;%s' % (now, c_id) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(host.comments) == 2 #  --- # External command: delete all host comment excmd = '[%d] DEL_ALL_HOST_COMMENTS;test_host_0' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.comments) == 0 expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] ADD_HOST_COMMENT;test_host_0;1;test_contact;My comment' % now), ('info', 'HOST COMMENT: test_host_0;test_contact;My comment'), ('info', u'EXTERNAL COMMAND: [%s] ADD_HOST_COMMENT;test_host_0;1;test_contact;My comment 2' % now), ('info', 'HOST COMMENT: test_host_0;test_contact;My comment 2'), ('info', u'EXTERNAL COMMAND: [%s] ADD_HOST_COMMENT;test_host_0;1;test_contact;My accented é"{|:âàç comment' % now), ('info', 'HOST COMMENT: test_host_0;test_contact;My accented é"{|:âàç comment'), ('info', u'EXTERNAL COMMAND: [%s] DEL_HOST_COMMENT;unknown_id' % now), ('warning', u'DEL_HOST_COMMENT: comment id: unknown_id does not exist and cannot be deleted.'), ('info', 'EXTERNAL COMMAND: [%s] DEL_HOST_COMMENT;%s' % (now, c_id)), ('info', 'EXTERNAL COMMAND: [%s] DEL_ALL_HOST_COMMENTS;test_host_0' % now) ] self.check_monitoring_events_log(expected_logs) @freeze_time("2017-06-01 18:30:00") def test_service_comments(self): """ Test the comments for services :return: None """ #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc.customs is not None assert svc.get_check_command() == "check_service" # assert svc.get_check_command() == "check_service!ok" assert svc.customs['_CUSTNAME'] == 'custvalue' assert svc.comments == {} now = int(time.time()) #  --- # External command: add an host comment assert svc.comments == {} excmd = '[%d] ADD_SVC_COMMENT;test_host_0;test_ok_0;1;test_contact;My comment' \ % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.comments) == 1 comment = list(svc.comments.values())[0] assert comment.comment == "My comment" assert comment.author == "test_contact" #  --- # External command: add another host comment excmd = '[%d] ADD_SVC_COMMENT;test_host_0;test_ok_0;1;test_contact;My comment 2' \ % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.comments) == 2 #  --- # External command: yet another host comment excmd = '[%d] ADD_SVC_COMMENT;test_host_0;test_ok_0;1;test_contact;My accented ' \ 'é"{|:âàç comment' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.comments) == 3 #  --- # External command: delete an host comment (unknown comment) excmd = '[%d] DEL_SVC_COMMENT;unknown_id' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(svc.comments) == 3 #  --- # External command: delete an host comment c_id = list(svc.comments)[0] excmd = '[%d] DEL_SVC_COMMENT;%s' % (now, c_id) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(svc.comments) == 2 #  --- # External command: delete all host comment excmd = '[%d] DEL_ALL_SVC_COMMENTS;test_host_0;test_ok_0' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.comments) == 0 expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] ADD_SVC_COMMENT;' u'test_host_0;test_ok_0;1;test_contact;My comment' % now), ('info', 'SERVICE COMMENT: test_host_0;test_ok_0;test_contact;My comment'), ('info', u'EXTERNAL COMMAND: [%s] ADD_SVC_COMMENT;' u'test_host_0;test_ok_0;1;test_contact;My comment 2' % now), ('info', 'SERVICE COMMENT: test_host_0;test_ok_0;test_contact;My comment 2'), ('info', u'EXTERNAL COMMAND: [%s] ADD_SVC_COMMENT;' u'test_host_0;test_ok_0;1;test_contact;My accented é"{|:âàç comment' % now), ('info', 'SERVICE COMMENT: test_host_0;test_ok_0;test_contact;My accented é"{|:âàç comment'), ('info', u'EXTERNAL COMMAND: [%s] DEL_SVC_COMMENT;unknown_id' % now), ('warning', u'DEL_SVC_COMMENT: comment id: unknown_id does not exist and cannot be deleted.'), ('info', 'EXTERNAL COMMAND: [%s] DEL_SVC_COMMENT;%s' % ( now, c_id)), ('info', u'EXTERNAL COMMAND: [%s] DEL_ALL_SVC_COMMENTS;test_host_0;test_ok_0' % now), ] self.check_monitoring_events_log(expected_logs) @freeze_time("2017-06-01 18:30:00") def test_host_acknowledges(self): """ Test the acknowledges for hosts :return: None """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print("Host: %s - state: %s/%s" % (host, host.state_type, host.state)) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False router.active_checks_enabled = True router.passive_checks_enabled = True print("Router: %s - state: %s/%s" % (router, router.state_type, router.state)) assert router is not None now = int(time.time()) # Passive checks for hosts - special case # --------------------------------------------- # Host is DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Host is DOWN' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.show_checks() self.assert_checks_count(2) # Host check and service may happen in any order... because launched almost simultaneously! self.assert_any_check_match('test_hostcheck.pl', 'command') self.assert_any_check_match('hostname test_host_0', 'command') self.assert_any_check_match('test_servicecheck.pl', 'command') self.assert_any_check_match('hostname test_host_0', 'command') self.assert_any_check_match('servicedesc test_ok_0', 'command') assert 'DOWN' == router.state assert 'Host is DOWN' == router.output assert False == router.problem_has_been_acknowledged # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Remove acknowledge router excmd = '[%d] REMOVE_HOST_ACKNOWLEDGEMENT;test_router_0' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert False == router.problem_has_been_acknowledged expected_logs = [ ('warning', u'PASSIVE HOST CHECK: test_router_0;2;Host is DOWN;;'), ('error', u'HOST ALERT: test_router_0;DOWN;SOFT;1;Host is DOWN'), ('info', u'HOST ACKNOWLEDGE ALERT: test_router_0;STARTED; Host problem has been acknowledged'), ('info', u'EXTERNAL COMMAND: [1496341800] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test'), ('info', u'HOST NOTIFICATION: test_contact;test_router_0;ACKNOWLEDGEMENT (DOWN);0;notify-host;Host is DOWN'), ('info', u'EXTERNAL COMMAND: [1496341800] REMOVE_HOST_ACKNOWLEDGEMENT;test_router_0'), ('info', u'HOST ACKNOWLEDGE ALERT: test_router_0;EXPIRED; Host problem acknowledge expired') ] self.check_monitoring_events_log(expected_logs) @freeze_time("2017-06-01 18:30:00") def test_service_acknowledges(self): """ Test the acknowledges for services :return: None """ self._main_broker.broks = [] # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print("Host: %s - state: %s/%s" % (host, host.state_type, host.state)) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False router.active_checks_enabled = True router.passive_checks_enabled = True print("Router: %s - state: %s/%s" % (router, router.state_type, router.state)) assert router is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state)) now = int(time.time()) # Passive checks for services # --------------------------------------------- # Receive passive service check Warning excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;' \ 'test_host_0;test_ok_0;1;Service is WARNING' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[host, 0, 'Host is UP']]) assert 'WARNING' == svc.state assert 'Service is WARNING' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;' \ 'test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert True == svc.problem_has_been_acknowledged # Remove acknowledge service excmd = '[%d] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;test_ok_0' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert False == svc.problem_has_been_acknowledged expected_logs = [ # ('info', # 'RETENTION LOAD: scheduler-master'), ('warning', 'PASSIVE SERVICE CHECK: test_host_0;test_ok_0;1;Service is WARNING;;'), ('info', 'ACTIVE HOST CHECK: test_host_0;UP;0;Host is UP'), ('warning', 'SERVICE ALERT: test_host_0;test_ok_0;WARNING;SOFT;1;Service is WARNING'), ('info', 'EXTERNAL COMMAND: [%s] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % now), ('info', 'SERVICE ACKNOWLEDGE ALERT: test_host_0;test_ok_0;STARTED; Service problem has been acknowledged'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;ACKNOWLEDGEMENT (WARNING);' '0;notify-service;Service is WARNING'), ('info', 'EXTERNAL COMMAND: [%s] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;test_ok_0' % now), ('info', 'SERVICE ACKNOWLEDGE ALERT: test_host_0;test_ok_0;EXPIRED; Service problem acknowledge expired') ] self.check_monitoring_events_log(expected_logs) @freeze_time("2017-06-01 18:30:00") def test_host_downtimes_host_up(self): """ Test the downtime for hosts - host is UP :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") host.act_depend_of = [] # ignore the host which we depend of host.checks_in_progress = [] host.event_handler_enabled = False assert host.downtimes == {} # Its service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # ignore the host which we depend of svc.event_handler_enabled = False # Freeze the time ! initial_datetime = datetime.datetime(year=2017, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime now = int(time.time()) # --------------------------------------------- # Receive passive host check Host is up and alive excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is alive' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'HARD' == host.state_type assert 'Host is alive' == host.output #  --- # External command: add an host downtime assert host.downtimes == {} # Host is not currently a problem assert False == host.is_problem assert False == host.problem_has_been_acknowledged # Host service is not currently a problem assert False == svc.is_problem assert False == svc.problem_has_been_acknowledged excmd = '[%d] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;' \ 'test_contact;My first downtime' % (now, now, now + 2) self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Host is still not a problem - the downtime do not change anything to this # because no acknowledge has been set in this case assert False == host.is_problem assert False == host.problem_has_been_acknowledged # Host service is neither impacted assert False == svc.is_problem assert False == svc.problem_has_been_acknowledged assert len(host.downtimes) == 1 downtime = list(host.downtimes.values())[0] assert downtime.comment == "My first downtime" assert downtime.author == "test_contact" assert downtime.start_time == now assert downtime.end_time == now + 2 assert downtime.duration == 2 assert downtime.fixed == True assert downtime.trigger_id == "0" # Time warp 1 second frozen_datetime.tick() self.external_command_loop() # Notification: downtime start only... self.assert_actions_count(1) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # Time warp 2 seconds frozen_datetime.tick() frozen_datetime.tick() self.external_command_loop() # Notification: downtime start and end self.show_actions() self.assert_actions_count(2) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The downtime stopped self.assert_actions_match(1, '/notifier.pl', 'command') self.assert_actions_match(1, 'DOWNTIMEEND', 'type') self.assert_actions_match(1, 'scheduled', 'status') # Clear actions self.clear_actions() self.show_actions() time.sleep(1) expected_logs = [ # Host UP ('info', u'PASSIVE HOST CHECK: test_host_0;0;Host is alive;;'), # First downtime ('info', u'EXTERNAL COMMAND: [1496341800] SCHEDULE_HOST_DOWNTIME;test_host_0;1496341800;' u'1496341802;1;0;1200;test_contact;My first downtime'), ('info', u'HOST DOWNTIME ALERT: test_host_0;STARTED; Host has entered a period ' u'of scheduled downtime'), ('info', u'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMESTART (UP);0;notify-host;' u'Host is alive'), ('info', u'HOST DOWNTIME ALERT: test_host_0;STOPPED; Host has exited from a period ' u'of scheduled downtime'), ('info', u'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMEEND (UP);0;notify-host;' u'Host is alive') ] self.check_monitoring_events_log(expected_logs) def test_host_downtimes_host_down(self): """ Test the downtime for hosts - host is DOWN :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") host.act_depend_of = [] # ignore the host which we depend of host.checks_in_progress = [] host.event_handler_enabled = False assert host.downtimes == {} # Its service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # ignore the host which we depend of svc.event_handler_enabled = False # Freeze the time ! initial_datetime = datetime.datetime(year=2017, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime now = int(time.time()) # Passive checks for hosts # --------------------------------------------- # Receive passive host check Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == host.state assert 'SOFT' == host.state_type assert 'Host is dead' == host.output excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == host.state assert 'SOFT' == host.state_type assert 'Host is dead' == host.output excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) assert 'DOWN' == host.state assert 'HARD' == host.state_type assert 'Host is dead' == host.output # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() # Host problem only... self.show_actions() self.assert_actions_count(2) # The host problem is notified self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 ' '--notificationtype PROBLEM --hoststate DOWN ' '--hostoutput Host is dead ', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=PROBLEM, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=n/a, ' 'NOTIFICATIONAUTHORNAME=n/a, ' 'NOTIFICATIONAUTHORALIAS=n/a, ' 'NOTIFICATIONCOMMENT=n/a, ' 'HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') self.assert_actions_match(1, 'VOID', 'command') #  --- # The host is now a problem... assert True == host.is_problem # and the problem is not yet acknowledged assert False == host.problem_has_been_acknowledged # Simulate that the host service is also a problem svc.is_problem = True svc.problem_has_been_acknowledged = False svc.state_id = 2 svc.state = 'CRITICAL' # External command: add an host downtime excmd = '[%d] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;' \ 'test_contact;My first downtime' % (now, now + 2, now + 10) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.downtimes) == 1 downtime = list(host.downtimes.values())[0] assert downtime.comment == "My first downtime" assert downtime.author == "test_contact" assert downtime.start_time == now + 2 assert downtime.end_time == now + 10 assert downtime.duration == 8 assert downtime.fixed == True assert downtime.trigger_id == "0" # Time warp 1 second frozen_datetime.tick() self.external_command_loop() # Host problem only... self.show_actions() self.assert_actions_count(3) # The host problem is notified self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 ' '--notificationtype PROBLEM --hoststate DOWN ' '--hostoutput Host is dead ', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=PROBLEM, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=n/a, ' 'NOTIFICATIONAUTHORNAME=n/a, ' 'NOTIFICATIONAUTHORALIAS=n/a, ' 'NOTIFICATIONCOMMENT=n/a, ' 'HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') # And the downtime self.assert_actions_match(1, 'notifier.pl --hostname test_host_0 ' '--notificationtype DOWNTIMESTART --hoststate DOWN ' '--hostoutput Host is dead ', 'command') self.assert_actions_match(1, 'NOTIFICATIONTYPE=DOWNTIMESTART, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=test_contact, ' 'NOTIFICATIONAUTHORNAME=Not available, ' 'NOTIFICATIONAUTHORALIAS=Not available, ' 'NOTIFICATIONCOMMENT=My first downtime, ' 'HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') self.assert_actions_match(2, 'VOID', 'command') # Let the downtime start... # Time warp 10 seconds frozen_datetime.tick(delta=datetime.timedelta(seconds=10)) self.external_command_loop() # Notification: downtime start and end self.show_actions() # Host problem and acknowledgement only... self.assert_actions_count(4) # The host problem is notified self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 ' '--notificationtype PROBLEM --hoststate DOWN ' '--hostoutput Host is dead ', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=PROBLEM, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=n/a, ' 'NOTIFICATIONAUTHORNAME=n/a, ' 'NOTIFICATIONAUTHORALIAS=n/a, ' 'NOTIFICATIONCOMMENT=n/a, ' 'HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') # And the downtime self.assert_actions_match(1, 'notifier.pl --hostname test_host_0 ' '--notificationtype DOWNTIMESTART --hoststate DOWN ' '--hostoutput Host is dead ', 'command') self.assert_actions_match(1, 'NOTIFICATIONTYPE=DOWNTIMESTART, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=test_contact, ' 'NOTIFICATIONAUTHORNAME=Not available, ' 'NOTIFICATIONAUTHORALIAS=Not available, ' 'NOTIFICATIONCOMMENT=My first downtime, ' 'HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') # And the downtime end self.assert_actions_match(2, 'notifier.pl --hostname test_host_0 ' '--notificationtype DOWNTIMEEND --hoststate DOWN ' '--hostoutput Host is dead ', 'command') self.assert_actions_match(2, 'NOTIFICATIONTYPE=DOWNTIMEEND, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=test_contact, ' 'NOTIFICATIONAUTHORNAME=Not available, ' 'NOTIFICATIONAUTHORALIAS=Not available, ' 'NOTIFICATIONCOMMENT=My first downtime, ' 'HOSTNOTIFICATIONNUMBER=1, ' 'SERVICENOTIFICATIONNUMBER=1', 'command') self.assert_actions_match(3, 'VOID', 'command') # Clear actions self.clear_actions() self.show_actions() # Time warp 1 second frozen_datetime.tick() expected_logs = [ ('warning', u'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'), ('warning', u'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'), ('warning', u'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'), ('error', u'HOST ALERT: test_host_0;DOWN;SOFT;1;Host is dead'), ('error', u'HOST ALERT: test_host_0;DOWN;SOFT;2;Host is dead'), ('error', u'HOST ALERT: test_host_0;DOWN;HARD;3;Host is dead'), ('error', u'HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;Host is dead'), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;' u'1200;test_contact;My first downtime' % (now, now + 2, now + 10)), ('info', u'SERVICE ACKNOWLEDGE ALERT: test_host_0;test_ok_0;STARTED; Service problem has been acknowledged'), ('info', u'HOST ACKNOWLEDGE ALERT: test_host_0;STARTED; Host problem has been acknowledged'), ('info', u'HOST DOWNTIME ALERT: test_host_0;STARTED; Host has entered a period of scheduled downtime'), ('info', u'HOST DOWNTIME ALERT: test_host_0;STOPPED; Host has exited from a period of scheduled downtime'), ('info', u'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMEEND (DOWN);1;notify-host;Host is dead'), ('info', u'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMESTART (DOWN);1;notify-host;Host is dead'), ] self.check_monitoring_events_log(expected_logs) def test_host_downtimes_host_delete(self): """ Test the downtime for hosts - host is DOWN - create and delete downtimes :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") host.act_depend_of = [] # ignore the host which we depend of host.checks_in_progress = [] host.event_handler_enabled = False host.notifications_interval = 720 assert host.downtimes == {} # Its service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # ignore the host which we depend of svc.event_handler_enabled = False svc.notifications_interval = 720 # Clean broks to delete scheduler retention load message self._main_broker.broks = [] # Freeze the time ! initial_datetime = datetime.datetime(year=2017, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Passive checks for hosts # --------------------------------------------- # Receive passive host check Up excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is alive' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'HARD' == host.state_type assert 'Host is alive' == host.output # Time warp 1 minute frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Receive passive host check Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == host.state assert 'SOFT' == host.state_type assert 'Host is dead' == host.output # Time warp 1 minute frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == host.state assert 'SOFT' == host.state_type assert 'Host is dead' == host.output # Time warp 1 minute frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is dead' % int( time.time()) self._scheduler.run_external_commands([excmd]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() self.external_command_loop() assert 'DOWN' == host.state assert 'HARD' == host.state_type assert 'Host is dead' == host.output # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) #  --- # External command: add an host downtime # Simulate that the host is now a problem but the downtime starts in some seconds host.is_problem = True host.problem_has_been_acknowledged = False # Host service is now a problem svc.is_problem = True svc.problem_has_been_acknowledged = False svc.state_id = 2 svc.state = 'CRITICAL' # and the problem is not acknowledged assert False == host.problem_has_been_acknowledged # Schedule a downtime for the host - 15 minutes downtime now = int(time.time()) excmd = '[%d] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;' \ 'test_contact;My first downtime' % ( now, now+2, now + 15*60) self._scheduler.run_external_commands([excmd]) # Time warp 1 second - the downtime is not yet started frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() # Host is a problem - assert True == host.is_problem assert False == host.problem_has_been_acknowledged # Host service is neither impacted assert True == svc.is_problem assert False == svc.problem_has_been_acknowledged assert len(host.downtimes) == 1 downtime = list(host.downtimes.values())[0] assert downtime.comment == "My first downtime" assert downtime.author == "test_contact" assert downtime.start_time == now + 2 assert downtime.end_time == now + 15*60 assert downtime.duration == 15*60 - 2 assert downtime.fixed == True assert downtime.trigger_id == "0" frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Time warp 1 minute- the downtime is started now! self.external_command_loop() #  --- # External command: yet another host downtime excmd = '[%d] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;test_contact;' \ u'My accented é"{|:âàç downtime' % ( now + 10, now + 180, now + 360) self._scheduler.run_external_commands([excmd]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() # Time warp 1 second - the new downtime is now started frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() assert len(host.downtimes) == 2 #  --- # External command: delete an host downtime (unknown downtime) excmd = '[%d] DEL_HOST_DOWNTIME;unknown_id' % (now + 100) self._scheduler.run_external_commands([excmd]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() # Still remaining 2 downtimes assert len(host.downtimes) == 2 #  --- # External command: delete an host downtime downtime = list(host.downtimes.values())[0] excmd = '[%d] DEL_HOST_DOWNTIME;%s' % (now + 200, downtime.uuid) self._scheduler.run_external_commands([excmd]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() assert len(host.downtimes) == 1 #  --- # External command: delete all host downtime excmd = '[%d] DEL_ALL_HOST_DOWNTIMES;test_host_0' % (now + 300) self._scheduler.run_external_commands([excmd]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop() assert len(host.downtimes) == 0 # We got 'monitoring_log' broks for logging to the monitoring logs... # monitoring_logs = [] # for brok in self._main_broker.broks: # if brok.type == 'monitoring_log': # data = unserialize(brok.data) # monitoring_logs.append((data['level'], data['message']) # # print(monitoring_logs) # el = [ # ('info', 'EXTERNAL COMMAND: [1496342582] DEL_HOST_DOWNTIME;unknown_id'), ('warning', 'DEL_HOST_DOWNTIME: downtime_id id: unknown_id does not exist and cannot be deleted.'), ('info', 'EXTERNAL COMMAND: [1496342582] DEL_HOST_DOWNTIME;02dc73b6-6cb2-4c56-84ec-f98f3a8ebe59'), ('info', 'HOST DOWNTIME ALERT: test_host_0;CANCELLED; Scheduled downtime for host has been cancelled.'), ('info', 'EXTERNAL COMMAND: [1496342582] DEL_ALL_HOST_DOWNTIMES;test_host_0') # ] expected_logs = [ ('info', u'PASSIVE HOST CHECK: test_host_0;0;Host is alive;;'), ('warning', u'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'), ('error', u'HOST ALERT: test_host_0;DOWN;SOFT;1;Host is dead'), ('warning', u'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'), ('error', u'HOST ALERT: test_host_0;DOWN;SOFT;2;Host is dead'), ('warning', u'PASSIVE HOST CHECK: test_host_0;2;Host is dead;;'), ('error', u'HOST ALERT: test_host_0;DOWN;HARD;3;Host is dead'), ('error', u'HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;Host is dead'), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;' u'test_contact;My first downtime' % (now, now + 2, now + 15*60)), ('error', u'HOST NOTIFICATION: test_contact;test_host_0;DOWN;2;notify-host;Host is dead'), ('info', u'HOST DOWNTIME ALERT: test_host_0;STARTED; Host has entered a period of scheduled downtime'), ('info', u'HOST ACKNOWLEDGE ALERT: test_host_0;STARTED; Host problem has been acknowledged'), ('info', u'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMESTART (DOWN);2;notify-host;Host is dead'), ('info', u'SERVICE ACKNOWLEDGE ALERT: test_host_0;test_ok_0;STARTED; Service problem has been acknowledged'), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;' u'test_contact;My accented é"{|:âàç downtime' % (now + 10, now + 180, now + 360)), ('info', u'EXTERNAL COMMAND: [%s] DEL_HOST_DOWNTIME;unknown_id' % (now + 100)), ('warning', u'DEL_HOST_DOWNTIME: downtime id: unknown_id ' u'does not exist and cannot be deleted.'), ('info', u'EXTERNAL COMMAND: [%s] DEL_HOST_DOWNTIME;%s' % (now + 200, downtime.uuid)), ('info', u'HOST DOWNTIME ALERT: test_host_0;CANCELLED; Scheduled downtime for host has been cancelled.'), ('info', u'EXTERNAL COMMAND: [%s] DEL_ALL_HOST_DOWNTIMES;test_host_0' % (now + 300)), ] self.check_monitoring_events_log(expected_logs) def test_service_downtimes(self): """ Test the downtimes for services :return: None """ #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc.customs is not None assert svc.get_check_command() == "check_service" # assert svc.get_check_command() == "check_service!ok" assert svc.customs['_CUSTNAME'] == 'custvalue' assert svc.comments == {} now = int(time.time()) #  --- # External command: add a service downtime assert svc.downtimes == {} excmd = '[%d] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%s;%s;1;0;1200;' \ 'test_contact;My downtime' % (now, now + 120, now + 1200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.downtimes) == 1 downtime_id = list(svc.downtimes)[0] downtime = list(svc.downtimes.values())[0] assert downtime.comment == "My downtime" assert downtime.author == "test_contact" assert downtime.start_time == now + 120 assert downtime.end_time == now + 1200 assert downtime.duration == 1080 assert downtime.fixed == True assert downtime.trigger_id == "0" #  --- # External command: add another service downtime excmd = '[%d] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%s;%s;1;0;1200;' \ 'test_contact;My downtime 2' % (now, now + 1120, now + 11200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.downtimes) == 2 #  --- # External command: yet another service downtime excmd = '[%d] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%s;%s;1;0;1200;test_contact;' \ 'My accented é"{|:âàç downtime' % ( now, now + 2120, now + 21200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'%s;%s;1;0;1200;test_contact;My downtime' % (now, now + 120, now + 1200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'%s;%s;1;0;1200;test_contact;My downtime 2' % (now, now + 1120, now + 11200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'%s;%s;1;0;1200;test_contact;My accented é"{|:âàç downtime' % ( now, now + 2120, now + 21200)), ] self.check_monitoring_events_log(expected_logs) assert len(svc.downtimes) == 3 #  --- # External command: delete a service downtime (unknown downtime) excmd = '[%d] DEL_SVC_DOWNTIME;unknown_id' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(svc.downtimes) == 3 #  --- # External command: delete a service downtime excmd = '[%d] DEL_SVC_DOWNTIME;%s' % (now, downtime_id) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(svc.downtimes) == 2 #  --- # External command: delete all service downtime excmd = '[%d] DEL_ALL_SVC_DOWNTIMES;test_host_0;test_ok_0' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(svc.downtimes) == 0 expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'%s;%s;1;0;1200;test_contact;My downtime' % (now, now + 120, now + 1200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'%s;%s;1;0;1200;test_contact;My downtime 2' % (now, now + 1120, now + 11200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'%s;%s;1;0;1200;test_contact;My accented é"{|:âàç downtime' % ( now, now + 2120, now + 21200)), ('info', u'EXTERNAL COMMAND: [%s] DEL_SVC_DOWNTIME;unknown_id' % now), ('warning', u'DEL_SVC_DOWNTIME: downtime id: unknown_id does ' u'not exist and cannot be deleted.'), ('info', u'EXTERNAL COMMAND: [%s] DEL_SVC_DOWNTIME;%s' % (now, downtime_id)), ('info', u'EXTERNAL COMMAND: [%s] DEL_ALL_SVC_DOWNTIMES;test_host_0;test_ok_0' % now), ] self.check_monitoring_events_log(expected_logs) def test_contact_downtimes(self): """ Test the downtime for hosts :return: None """ self.clear_logs() # An host and a contact... host = self._scheduler.hosts.find_by_name("test_host_0") contact = self._scheduler.contacts[host.contacts[0]] assert contact is not None assert contact.contact_name == "test_contact" now = int(time.time()) #  --- # External command: add a contact downtime assert host.downtimes == {} now = int(time.time()) excmd = '[%d] SCHEDULE_CONTACT_DOWNTIME;test_contact;%s;%s;test_contact;My downtime' \ % (now, now + 120, now + 1200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(contact.downtimes) == 1 downtime_id = list(contact.downtimes)[0] downtime = contact.downtimes[downtime_id] assert downtime.comment == "My downtime" assert downtime.author == "test_contact" assert downtime.start_time == now + 120 assert downtime.end_time == now + 1200 #  --- # External command: add another contact downtime excmd = '[%d] SCHEDULE_CONTACT_DOWNTIME;test_contact;%s;%s;test_contact;My downtime 2' \ % (now, now + 1120, now + 11200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(contact.downtimes) == 2 #  --- # External command: yet another contact downtime excmd = '[%d] SCHEDULE_CONTACT_DOWNTIME;test_contact;%s;%s;test_contact;' \ 'My accented é"{|:âàç downtime' % ( now, now + 2120, now + 21200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_CONTACT_DOWNTIME;test_contact;' u'%s;%s;test_contact;My downtime' % (now, now + 120, now + 1200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_CONTACT_DOWNTIME;test_contact;' u'%s;%s;test_contact;My downtime 2' % (now, now + 1120, now + 11200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_CONTACT_DOWNTIME;test_contact;' u'%s;%s;test_contact;My accented é"{|:âàç downtime' % (now, now + 2120, now + 21200)), ] self.show_logs() self.check_monitoring_events_log(expected_logs, dump=True) assert len(contact.downtimes) == 3 #  --- # External command: delete a contact downtime (unknown downtime) excmd = '[%d] DEL_CONTACT_DOWNTIME;unknown_id' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(contact.downtimes) == 3 #  --- # External command: delete an host downtime excmd = '[%d] DEL_CONTACT_DOWNTIME;%s' % (now, downtime_id) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, []) assert len(contact.downtimes) == 2 #  --- # External command: delete all host downtime excmd = '[%d] DEL_ALL_CONTACT_DOWNTIMES;test_contact' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(contact.downtimes) == 0 expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_CONTACT_DOWNTIME;test_contact;' u'%s;%s;test_contact;My downtime' % (now, now + 120, now + 1200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_CONTACT_DOWNTIME;test_contact;' u'%s;%s;test_contact;My downtime 2' % (now, now + 1120, now + 11200)), ('info', u'EXTERNAL COMMAND: [%s] SCHEDULE_CONTACT_DOWNTIME;test_contact;' u'%s;%s;test_contact;My accented é"{|:âàç downtime' % (now, now + 2120, now + 21200)), ('info', u'EXTERNAL COMMAND: [%s] DEL_CONTACT_DOWNTIME;unknown_id' % now), ('warning', u'DEL_CONTACT_DOWNTIME: downtime id: unknown_id does ' u'not exist and cannot be deleted.'), ('info', u'EXTERNAL COMMAND: [%s] DEL_CONTACT_DOWNTIME;%s' % (now, downtime_id)), ('info', 'CONTACT DOWNTIME ALERT: test_contact;CANCELLED; Scheduled downtime ' 'for contact has been cancelled.'), ('info', u'EXTERNAL COMMAND: [%s] DEL_ALL_CONTACT_DOWNTIMES;test_contact' % now), ('info', 'CONTACT DOWNTIME ALERT: test_contact;CANCELLED; Scheduled downtime ' 'for contact has been cancelled.'), ('info', 'CONTACT DOWNTIME ALERT: test_contact;CANCELLED; Scheduled downtime ' 'for contact has been cancelled.') ] self.check_monitoring_events_log(expected_logs, dump=True) def test_contactgroup(self): """ Test the commands for contacts groups :return: None """ #  A contact... contact = self._scheduler.contacts.find_by_name("test_contact") assert contact is not None #  A contactgroup ... contactgroup = self._scheduler.contactgroups.find_by_name( "test_contact") assert contactgroup is not None #  --- # External command: disable / enable notifications for a contacts group excmd = '[%d] DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;test_contact' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for contact_id in contactgroup.get_contacts(): assert not self._scheduler.contacts[contact_id].host_notifications_enabled excmd = '[%d] ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;test_contact' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for contact_id in contactgroup.get_contacts(): assert self._scheduler.contacts[contact_id].host_notifications_enabled #  --- # External command: disable / enable passive checks for a contacts group excmd = '[%d] DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS;test_contact' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for contact_id in contactgroup.get_contacts(): assert not self._scheduler.contacts[contact_id].service_notifications_enabled excmd = '[%d] ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;test_contact' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for contact_id in contactgroup.get_contacts(): assert self._scheduler.contacts[contact_id].service_notifications_enabled def test_hostgroup(self): """ Test the commands for hosts groups :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host is not None #  An hostrgoup... hostgroup = self._scheduler.hostgroups.find_by_name("allhosts") assert hostgroup is not None #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc is not None now = int(time.time()) #  --- # External command: disable /enable checks for an hostgroup (hosts) excmd = '[%d] DISABLE_HOSTGROUP_HOST_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): assert not self._scheduler.hosts[host_id].active_checks_enabled excmd = '[%d] ENABLE_HOSTGROUP_HOST_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): assert self._scheduler.hosts[host_id].active_checks_enabled #  --- # External command: disable / enable notifications for an hostgroup (hosts) excmd = '[%d] DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): assert not self._scheduler.hosts[host_id].notifications_enabled excmd = '[%d] ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): assert self._scheduler.hosts[host_id].notifications_enabled #  --- # External command: disable / enable passive checks for an hostgroup (hosts) excmd = '[%d] DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): assert not self._scheduler.hosts[host_id].passive_checks_enabled excmd = '[%d] ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): assert self._scheduler.hosts[host_id].passive_checks_enabled #  --- # External command: disable / enable passive checks for an hostgroup (services) excmd = '[%d] DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): if host_id in self._scheduler.hosts: for service_id in self._scheduler.hosts[host_id].services: assert not self._scheduler.services[service_id].passive_checks_enabled excmd = '[%d] ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): if host_id in self._scheduler.hosts: for service_id in self._scheduler.hosts[host_id].services: assert self._scheduler.services[service_id].passive_checks_enabled #  --- # External command: disable checks for an hostgroup (services) excmd = '[%d] DISABLE_HOSTGROUP_SVC_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): if host_id in self._scheduler.hosts: for service_id in self._scheduler.hosts[host_id].services: assert not self._scheduler.services[service_id].active_checks_enabled excmd = '[%d] ENABLE_HOSTGROUP_SVC_CHECKS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): if host_id in self._scheduler.hosts: for service_id in self._scheduler.hosts[host_id].services: assert self._scheduler.services[service_id].active_checks_enabled #  --- # External command: disable notifications for an hostgroup (services) excmd = '[%d] DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): if host_id in self._scheduler.hosts: for service_id in self._scheduler.hosts[host_id].services: assert not self._scheduler.services[service_id].notifications_enabled excmd = '[%d] ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;allhosts' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() for host_id in hostgroup.get_hosts(): if host_id in self._scheduler.hosts: for service_id in self._scheduler.hosts[host_id].services: assert self._scheduler.services[service_id].notifications_enabled #  --- # External command: add an host downtime assert host.downtimes == {} excmd = '[%d] SCHEDULE_HOSTGROUP_HOST_DOWNTIME;allhosts;%s;%s;1;0;1200;' \ 'test_contact;My downtime' \ % (now, now + 120, now + 1200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.downtimes) == 1 for host_id in hostgroup.get_hosts(): host = self._scheduler.hosts[host_id] downtime_id = list(host.downtimes)[0] downtime = list(host.downtimes.values())[0] assert downtime.comment == "My downtime" assert downtime.author == "test_contact" assert downtime.start_time == now + 120 assert downtime.end_time == now + 1200 assert downtime.duration == 1080 assert downtime.fixed == True assert downtime.trigger_id == "0" #  --- # External command: add an host downtime excmd = '[%d] SCHEDULE_HOSTGROUP_SVC_DOWNTIME;allhosts;%s;%s;1;0;1200;' \ 'test_contact;My downtime' \ % (now, now + 120, now + 1200) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert len(host.downtimes) == 1 for host_id in hostgroup.get_hosts(): host = self._scheduler.hosts[host_id] for service_id in host.services: service = self._scheduler.services[service_id] downtime_id = list(host.downtimes)[0] downtime = list(host.downtimes.values())[0] assert downtime.comment == "My downtime" assert downtime.author == "test_contact" assert downtime.start_time == now + 120 assert downtime.end_time == now + 1200 assert downtime.duration == 1080 assert downtime.fixed == True assert downtime.trigger_id == "0" def test_host(self): """ Test the commands for hosts :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host is not None #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc.customs is not None #  --- # External command: disable / enable checks assert host.active_checks_enabled assert host.passive_checks_enabled assert svc.passive_checks_enabled excmd = '[%d] DISABLE_HOST_CHECK;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not host.active_checks_enabled # Not changed! assert host.passive_checks_enabled excmd = '[%d] ENABLE_HOST_CHECK;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.active_checks_enabled assert host.passive_checks_enabled excmd = '[%d] DISABLE_HOST_SVC_CHECKS;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.active_checks_enabled # Not changed! assert svc.passive_checks_enabled excmd = '[%d] ENABLE_HOST_SVC_CHECKS;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.active_checks_enabled assert svc.passive_checks_enabled #  --- # External command: disable / enable freshness check assert not host.check_freshness excmd = '[%d] ENABLE_HOST_FRESHNESS_CHECK;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.check_freshness excmd = '[%d] DISABLE_HOST_FRESHNESS_CHECK;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not host.check_freshness #  --- # External command: disable / enable event handlers assert host.event_handler_enabled excmd = '[%d] DISABLE_HOST_EVENT_HANDLER;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not host.event_handler_enabled excmd = '[%d] ENABLE_HOST_EVENT_HANDLER;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.event_handler_enabled #  --- # External command: disable / enable notifications assert host.notifications_enabled assert svc.notifications_enabled excmd = '[%d] DISABLE_HOST_NOTIFICATIONS;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not host.notifications_enabled excmd = '[%d] ENABLE_HOST_NOTIFICATIONS;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.notifications_enabled excmd = '[%d] DISABLE_HOST_SVC_NOTIFICATIONS;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.notifications_enabled excmd = '[%d] ENABLE_HOST_SVC_NOTIFICATIONS;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.notifications_enabled #  --- # External command: disable / enable checks assert host.flap_detection_enabled excmd = '[%d] DISABLE_HOST_FLAP_DETECTION;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not host.flap_detection_enabled excmd = '[%d] ENABLE_HOST_FLAP_DETECTION;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert host.flap_detection_enabled #  --- # External command: schedule host check excmd = '[%d] SCHEDULE_FORCED_HOST_CHECK;test_host_0;1000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() excmd = '[%d] SCHEDULE_FORCED_HOST_SVC_CHECKS;test_host_0;1000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() excmd = '[%d] SCHEDULE_HOST_CHECK;test_host_0;1000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() #  --- # External command: schedule host services checks excmd = '[%d] SCHEDULE_HOST_SVC_CHECKS;test_host_0;1000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() #  --- # External command: launch service event handler excmd = '[%d] LAUNCH_HOST_EVENT_HANDLER;test_host_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() def test_global_host_commands(self): """ Test global hosts commands :return: None """ #  --- # External command: disable / enable freshness checks for all hosts assert self._scheduler.external_commands_manager.my_conf.check_host_freshness excmd = '[%d] DISABLE_HOST_FRESHNESS_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.check_host_freshness excmd = '[%d] ENABLE_HOST_FRESHNESS_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.check_host_freshness def test_servicegroup(self): """ Test the commands for hosts groups :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host is not None #  A servicegroup... servicegroup = self._scheduler.servicegroups.find_by_name("ok") assert servicegroup is not None #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc is not None #  --- # External command: disable /enable checks for an servicegroup (hosts) excmd = '[%d] DISABLE_SERVICEGROUP_HOST_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): host_id = self._scheduler.services[service_id].host assert not self._scheduler.hosts[host_id].active_checks_enabled excmd = '[%d] ENABLE_SERVICEGROUP_HOST_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): host_id = self._scheduler.services[service_id].host assert self._scheduler.hosts[host_id].active_checks_enabled #  --- # External command: disable / enable notifications for an servicegroup (hosts) excmd = '[%d] DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): host_id = self._scheduler.services[service_id].host assert not self._scheduler.hosts[host_id].notifications_enabled excmd = '[%d] ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): host_id = self._scheduler.services[service_id].host assert self._scheduler.hosts[host_id].notifications_enabled #  --- # External command: disable / enable passive checks for an servicegroup (hosts) excmd = '[%d] DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): host_id = self._scheduler.services[service_id].host assert not self._scheduler.hosts[host_id].passive_checks_enabled excmd = '[%d] ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): host_id = self._scheduler.services[service_id].host assert self._scheduler.hosts[host_id].passive_checks_enabled #  --- # External command: disable / enable passive checks for an servicegroup (services) excmd = '[%d] DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): assert not self._scheduler.services[service_id].passive_checks_enabled excmd = '[%d] ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): assert self._scheduler.services[service_id].passive_checks_enabled #  --- # External command: disable checks for an servicegroup (services) excmd = '[%d] DISABLE_SERVICEGROUP_SVC_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): assert not self._scheduler.services[service_id].active_checks_enabled excmd = '[%d] ENABLE_SERVICEGROUP_SVC_CHECKS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): assert self._scheduler.services[service_id].active_checks_enabled #  --- # External command: disable notifications for an servicegroup (services) excmd = '[%d] DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): assert not self._scheduler.services[service_id].notifications_enabled excmd = '[%d] ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;ok' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() for service_id in servicegroup.get_services(): assert self._scheduler.services[service_id].notifications_enabled def test_service(self): """ Test the commands for services :return: None """ #  An host... host = self._scheduler.hosts.find_by_name("test_host_0") assert host is not None #  A service... svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") assert svc.customs is not None #  --- # External command: disable / enable checks assert svc.active_checks_enabled assert svc.passive_checks_enabled assert svc.passive_checks_enabled excmd = '[%d] DISABLE_SVC_CHECK;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.active_checks_enabled # Not changed! assert svc.passive_checks_enabled excmd = '[%d] ENABLE_SVC_CHECK;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.active_checks_enabled assert svc.passive_checks_enabled #  --- # External command: disable / enable event handlers assert not svc.check_freshness excmd = '[%d] ENABLE_SVC_FRESHNESS_CHECK;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.check_freshness excmd = '[%d] DISABLE_SVC_FRESHNESS_CHECK;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.check_freshness #  --- # External command: disable / enable event handlers assert svc.event_handler_enabled excmd = '[%d] DISABLE_SVC_EVENT_HANDLER;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.event_handler_enabled excmd = '[%d] ENABLE_SVC_EVENT_HANDLER;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.event_handler_enabled #  --- # External command: disable / enable notifications assert svc.notifications_enabled assert svc.notifications_enabled excmd = '[%d] DISABLE_SVC_NOTIFICATIONS;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.notifications_enabled excmd = '[%d] ENABLE_SVC_NOTIFICATIONS;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.notifications_enabled #  --- # External command: disable / enable checks assert not svc.flap_detection_enabled excmd = '[%d] ENABLE_SVC_FLAP_DETECTION;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert svc.flap_detection_enabled excmd = '[%d] DISABLE_SVC_FLAP_DETECTION;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not svc.flap_detection_enabled #  --- # External command: schedule service check excmd = '[%d] SCHEDULE_FORCED_SVC_CHECK;test_host_0;test_ok_0;1000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() excmd = '[%d] SCHEDULE_SVC_CHECK;test_host_0;test_ok_0;1000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() #  --- # External command: launch service event handler excmd = '[%d] LAUNCH_SVC_EVENT_HANDLER;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() def test_global_service_commands(self): """ Test global hosts commands :return: None """ #  --- # External command: disable / enable freshness checks for all services assert self._scheduler.external_commands_manager.my_conf.check_service_freshness excmd = '[%d] DISABLE_SERVICE_FRESHNESS_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.check_service_freshness excmd = '[%d] ENABLE_SERVICE_FRESHNESS_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.check_service_freshness def test_global_commands(self): """ Test global hosts commands :return: None """ #  --- # External command: disable / enable performance data for all hosts assert self._scheduler.external_commands_manager.my_conf.enable_flap_detection excmd = '[%d] DISABLE_FLAP_DETECTION' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.enable_flap_detection excmd = '[%d] ENABLE_FLAP_DETECTION' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.enable_flap_detection #  --- # External command: disable / enable performance data for all hosts assert self._scheduler.external_commands_manager.my_conf.process_performance_data excmd = '[%d] DISABLE_PERFORMANCE_DATA' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.process_performance_data excmd = '[%d] ENABLE_PERFORMANCE_DATA' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.process_performance_data #  --- # External command: disable / enable global ent handers assert self._scheduler.external_commands_manager.my_conf.enable_notifications excmd = '[%d] DISABLE_NOTIFICATIONS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.enable_notifications self._scheduler.external_commands_manager.my_conf.modified_attributes = 0 excmd = '[%d] ENABLE_NOTIFICATIONS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.enable_notifications #  --- # External command: disable / enable global ent handers assert self._scheduler.external_commands_manager.my_conf.enable_event_handlers excmd = '[%d] DISABLE_EVENT_HANDLERS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.enable_event_handlers self._scheduler.external_commands_manager.my_conf.modified_attributes = 0 excmd = '[%d] ENABLE_EVENT_HANDLERS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.enable_event_handlers #  --- # External command: disable / enable global active hosts checks assert self._scheduler.external_commands_manager.my_conf.execute_host_checks excmd = '[%d] STOP_EXECUTING_HOST_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.execute_host_checks self._scheduler.external_commands_manager.my_conf.modified_attributes = 0 excmd = '[%d] START_EXECUTING_HOST_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.execute_host_checks #  --- # External command: disable / enable global active services checks assert self._scheduler.external_commands_manager.my_conf.execute_service_checks excmd = '[%d] STOP_EXECUTING_SVC_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.execute_service_checks self._scheduler.external_commands_manager.my_conf.modified_attributes = 0 excmd = '[%d] START_EXECUTING_SVC_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.execute_service_checks #  --- # External command: disable / enable global passive hosts checks assert self._scheduler.external_commands_manager.my_conf.accept_passive_host_checks excmd = '[%d] STOP_ACCEPTING_PASSIVE_HOST_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.accept_passive_host_checks self._scheduler.external_commands_manager.my_conf.modified_attributes = 0 excmd = '[%d] START_ACCEPTING_PASSIVE_HOST_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.accept_passive_host_checks #  --- # External command: disable / enable global passive services checks assert self._scheduler.external_commands_manager.my_conf.accept_passive_service_checks excmd = '[%d] STOP_ACCEPTING_PASSIVE_SVC_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert not self._scheduler.external_commands_manager.my_conf.accept_passive_service_checks self._scheduler.external_commands_manager.my_conf.modified_attributes = 0 excmd = '[%d] START_ACCEPTING_PASSIVE_SVC_CHECKS' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert self._scheduler.external_commands_manager.my_conf.accept_passive_service_checks @pytest.mark.skip("Temporarily disable - ext commands refactoring needed!") def test_special_commands(self): """ Test the special external commands :return: None """ # Clear logs and broks self.clear_logs() self._main_broker.broks = [] now = int(time.time()) # RESTART_PROGRAM excmd = '[%d] RESTART_PROGRAM' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.assert_any_log_match( 'RESTART command : libexec/sleep_command.sh 3') # There is no log made by the script because the command is a shell script ! # self.assert_any_log_match('I awoke after sleeping 3 seconds') # We got 'monitoring_log' broks for logging to the monitoring logs... monitoring_logs = [] for brok in self._main_broker.broks: if brok.type == 'monitoring_log': data = unserialize(brok.data) monitoring_logs.append((data['level'], data['message'])) # todo: reactivate this when external commands are refactored # expected_logs = [ # ('info', 'EXTERNAL COMMAND: [%s] RESTART_PROGRAM' % now), # ('info', 'I awoke after sleeping 3 seconds | sleep=3\\n') # ] # for log_level, log_message in expected_logs: # log_message = log_message.rstrip() # assert (log_level, log_message) in monitoring_logs # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # RELOAD_CONFIG excmd = '[%d] RELOAD_CONFIG' % now self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.assert_any_log_match( 'RELOAD command : libexec/sleep_command.sh 2') # There is no log made by the script because the command is a shell script ! # self.assert_any_log_match('I awoke after sleeping 2 seconds') # We got 'monitoring_log' broks for logging to the monitoring logs... monitoring_logs = [] for brok in self._main_broker.broks: if brok.type == 'monitoring_log': data = unserialize(brok.data) monitoring_logs.append((data['level'], data['message'])) # todo: reactivate this when external commands are refactored # expected_logs = [ # (u'info', u'EXTERNAL COMMAND: [%s] RELOAD_CONFIG' % (now)), # (u'info', u'I awoke after sleeping 2 seconds | sleep=2\\n') # ] # for log_level, log_message in expected_logs: # assert (log_level, log_message) in monitoring_logs # Todo: we should also test those Alignak specific commands: # del_host_dependency, # add_simple_host_dependency, # add_simple_poller def test_not_implemented(self): """ Test the not implemented external commands :return: None """ # Clear logs and broks self.clear_logs() self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SHUTDOWN_PROGRAM' % (now) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs = [ ('info', u'EXTERNAL COMMAND: [%s] SHUTDOWN_PROGRAM' % (now)), ('warning', u'SHUTDOWN_PROGRAM: this command is not implemented!') ] self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SET_HOST_NOTIFICATION_NUMBER;test_host_0;0' % (now) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') broks = [b for b in self._main_broker.broks if b.type == 'monitoring_log'] expected_logs = [ ('info', 'EXTERNAL COMMAND: [%s] SHUTDOWN_PROGRAM' % now), ('warning', 'SHUTDOWN_PROGRAM: this command is not implemented!'), ('info', 'EXTERNAL COMMAND: [%s] SET_HOST_NOTIFICATION_NUMBER;test_host_0;0' % now), ('warning', 'SET_HOST_NOTIFICATION_NUMBER: this command is not implemented!'), ] self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SET_SVC_NOTIFICATION_NUMBER;test_host_0;test_ok_0;1' % ( now) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] SET_SVC_NOTIFICATION_NUMBER;test_host_0;test_ok_0;1' % now), ('warning', 'SET_HOST_NOTIFICATION_NUMBER: this command is not implemented!') ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SEND_CUSTOM_HOST_NOTIFICATION;test_host_0;100;' \ 'test_contact;My notification' % (now) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] SET_SVC_NOTIFICATION_NUMBER;test_host_0;test_ok_0;1' % now), ('warning', 'SET_HOST_NOTIFICATION_NUMBER: this command is not implemented!') ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SEND_CUSTOM_SVC_NOTIFICATION;test_host_0;test_ok_0;100;' \ 'test_contact;My notification' % (now) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] SET_SVC_NOTIFICATION_NUMBER;test_host_0;test_ok_0;1' % now), ('warning', 'SET_HOST_NOTIFICATION_NUMBER: this command is not implemented!') ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;test_host_0;%s;%s;' \ '1;0;1200;test_contact;My downtime' % ( now, now + 120, now + 1200) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;test_contact;My downtime' % ( now, now + 120, now + 1200)), ('warning', 'SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME: this command is not implemented!') ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] now = int(time.time()) excmd = '[%d] SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME;test_host_0;%s;%s;' \ '1;0;1200;test_contact;My downtime' % ( now, now + 120, now + 1200) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME;test_host_0;%s;%s;1;0;1200;test_contact;My downtime' % ( now, now + 120, now + 1200)), ('warning', 'SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME: this command is not implemented!') ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] SAVE_STATE_INFORMATION' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') # Clear broks self._main_broker.broks = [] excmd = '[%d] READ_STATE_INFORMATION' % int(time.time()) self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] SAVE_STATE_INFORMATION' % now), ('warning', 'SAVE_STATE_INFORMATION: this command is not implemented!'), ('info', 'EXTERNAL COMMAND: [%s] READ_STATE_INFORMATION' % now), ('warning', 'READ_STATE_INFORMATION: this command is not implemented!') ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] PROCESS_FILE;file;1' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] PROCESS_FILE;file;1' % now), ('warning', 'PROCESS_FILE: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] ENABLE_HOST_AND_CHILD_NOTIFICATIONS;test_host_0' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] ENABLE_HOST_AND_CHILD_NOTIFICATIONS;test_host_0' % now), ('warning', 'ENABLE_HOST_AND_CHILD_NOTIFICATIONS: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] DISABLE_HOST_AND_CHILD_NOTIFICATIONS;test_host_0' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] DISABLE_HOST_AND_CHILD_NOTIFICATIONS;test_host_0' % now), ('warning', 'DISABLE_HOST_AND_CHILD_NOTIFICATIONS: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST;test_host_0' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST;test_host_0' % now), ('warning', 'DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST;test_host_0' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST;test_host_0' % now), ('warning', 'ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] CHANGE_GLOBAL_HOST_EVENT_HANDLER;check-host-alive' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] CHANGE_GLOBAL_HOST_EVENT_HANDLER;check-host-alive' % now), ('warning', 'CHANGE_GLOBAL_HOST_EVENT_HANDLER: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs) # Clear broks self._main_broker.broks = [] excmd = '[%d] CHANGE_GLOBAL_SVC_EVENT_HANDLER;check-host-alive' % now self._scheduler.run_external_commands([excmd]) self.assert_any_log_match('is not currently implemented in Alignak') expected_logs.extend([ ('info', 'EXTERNAL COMMAND: [%s] CHANGE_GLOBAL_SVC_EVENT_HANDLER;check-host-alive' % now), ('warning', 'CHANGE_GLOBAL_SVC_EVENT_HANDLER: this command is not implemented!'), ]) self.check_monitoring_events_log(expected_logs)
class TestExternalCommands(AlignakTest): ''' This class tests the external commands ''' def setUp(self): pass def test_basic_external_command(self): ''' Basic tests for the external command :return: ''' pass def test__command_syntax_receiver(self): pass @pytest.mark.skip("Temporarily disable - ext commands refactoring needed!") def test__command_syntax_dispatcher(self): pass def test__command_syntax_applyer(self): pass def _command_syntax(self): ''' External command parsing - named as test__ to be the first executed test :) :return: None ''' pass def test_several_commands(self): ''' External command management - several commands at once :return: None ''' pass def test_change_and_reset_host_modattr(self): ''' Change and reset modified attributes for an host :return: None ''' pass def test_change_and_reset_service_modattr(self): ''' Change and reset modified attributes for a service :return: None ''' pass def test_change_and_reset_contact_modattr(self): ''' Change an Noned reset modified attributes for a contact :return: None ''' pass def test_change_host_attributes(self): ''' Change host attributes :return: None ''' pass def test_change_service_attributes(self): '''Change service attributes :return: None ''' pass def test_change_contact_attributes(self): ''' Change contact attributes :return: None ''' pass @freeze_time("2017-06-01 18:30:00") def test_host_comments(self): ''' Test the comments for hosts :return: None ''' pass @freeze_time("2017-06-01 18:30:00") def test_service_comments(self): ''' Test the comments for services :return: None ''' pass @freeze_time("2017-06-01 18:30:00") def test_host_acknowledges(self): ''' Test the acknowledges for hosts :return: None ''' pass @freeze_time("2017-06-01 18:30:00") def test_service_acknowledges(self): ''' Test the acknowledges for services :return: None ''' pass @freeze_time("2017-06-01 18:30:00") def test_host_downtimes_host_up(self): ''' Test the downtime for hosts - host is UP :return: None ''' pass def test_host_downtimes_host_down(self): ''' Test the downtime for hosts - host is DOWN :return: None ''' pass def test_host_downtimes_host_delete(self): ''' Test the downtime for hosts - host is DOWN - create and delete downtimes :return: None ''' pass def test_service_downtimes(self): ''' Test the downtimes for services :return: None ''' pass def test_contact_downtimes(self): ''' Test the downtime for hosts :return: None ''' pass def test_contactgroup(self): ''' Test the commands for contacts groups :return: None ''' pass def test_hostgroup(self): ''' Test the commands for hosts groups :return: None ''' pass def test_host_comments(self): ''' Test the commands for hosts :return: None ''' pass def test_global_host_commands(self): ''' Test global hosts commands :return: None ''' pass def test_servicegroup(self): ''' Test the commands for hosts groups :return: None ''' pass def test_service_comments(self): ''' Test the commands for services :return: None ''' pass def test_global_service_commands(self): ''' Test global hosts commands :return: None ''' pass def test_global_commands(self): ''' Test global hosts commands :return: None ''' pass @pytest.mark.skip("Temporarily disable - ext commands refactoring needed!") def test_special_commands(self): ''' Test the special external commands :return: None ''' pass def test_not_implemented(self): ''' Test the not implemented external commands :return: None ''' pass
40
29
88
9
61
18
3
0.31
1
6
1
0
32
2
32
87
2,885
332
1,958
182
1,918
601
1,532
172
1,499
28
2
3
89
3,917
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_external_commands_passive_checks.py
tests.test_external_commands_passive_checks.TestExternalCommandsPassiveChecks
class TestExternalCommandsPassiveChecks(AlignakTest): """ This class tests the external commands for passive checks """ def setUp(self): super(TestExternalCommandsPassiveChecks, self).setUp() self.setup_with_file('cfg/cfg_external_commands.cfg', dispatching=True) assert self.conf_is_correct # No error messages assert len(self.configuration_errors) == 0 # No warning messages self.show_configuration_logs() assert len(self.configuration_warnings) == 0 def test_passive_checks_active_passive(self): """ Test passive host/service checks as external commands Hosts and services are active/passive checks enabled :return: """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print(("Host: %s - state: %s/%s" % (host, host.state_type, host.state))) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False router.active_checks_enabled = True router.passive_checks_enabled = True print(("Router: %s - state: %s/%s" % (router, router.state_type, router.state))) assert router is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # ignore the host which we depend of svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print(("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state))) # Active checks to set an initial state # --------------------------------------------- # Set host as UP and its service as CRITICAL self.scheduler_loop(1, [[host, 0, 'Host is UP | value1=1 value2=2']]) self.assert_checks_count(2) self.show_checks() # Prepared a check for the service and the router self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'UP' == host.state assert 'HARD' == host.state_type self.scheduler_loop( 1, [[svc, 2, 'Service is CRITICAL | value1=0 value2=0']]) self.assert_checks_count(2) self.show_checks() # Prepared a check for the host and the router self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_hostcheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') assert 'CRITICAL' == svc.state assert 'SOFT' == svc.state_type # Passive checks for hosts # --------------------------------------------- # Receive passive host check Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[router, 0, 'Host is UP']]) assert 'DOWN' == host.state assert 'Host is UP' == host.output # Receive passive host check Unreachable excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;1;Host is Unreachable' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[router, 0, 'Host is UP']]) assert 'DOWN' == host.state assert 'Host is Unreachable' == host.output # Receive passive host check Up excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'Host is UP' == host.output # Passive checks with performance data # --------------------------------------------- # Now with performance data excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP|rtt=9999' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'Host is UP' == host.output assert 'rtt=9999' == host.perf_data # Now with full-blown performance data. Here we have to watch out: # Is a ";" a separator for the external command or is it # part of the performance data? excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'Host is UP' == host.output assert 'rtt=9999;5;10;0;10000' == host.perf_data # Passive checks for services # --------------------------------------------- # Receive passive service check Warning excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Service is WARNING' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert 'Service is WARNING' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert True == svc.problem_has_been_acknowledged # Remove acknowledge service excmd = '[%d] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert False == svc.problem_has_been_acknowledged # Receive passive service check Critical excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;2;Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'CRITICAL' == svc.state assert 'Service is CRITICAL' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'CRITICAL' == svc.state assert True == svc.problem_has_been_acknowledged # Service is going ok ... excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;0;Service is OK' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'OK' == svc.state assert 'Service is OK' == svc.output # Acknowledge disappeared because service went OK assert False == svc.problem_has_been_acknowledged # Passive checks for hosts - special case # --------------------------------------------- # With timestamp in the past (before the last host check time!) # The check is ignored because too late in the past self.scheduler_loop(1, [[router, 0, 'Router is UP']]) router_last_check = router.last_chk past = router_last_check - 30 excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % past self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Router did not changed state! assert 'UP' == router.state assert 'Router is UP' == router.output router_last_check = router.last_chk # With timestamp in the past (- 1 seconds) # The check is accepted because it is equal or after the last host check time.sleep(2) past = router_last_check excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % past self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Router changed state! assert 'DOWN' == router.state assert 'Router is Down' == router.output assert router.last_chk == past # Now with crappy characters, like é excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Output contains crappy ' \ u'characters èàçé and spaces|rtt=9999' % int(time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert u'Output contains crappy characters èàçé and spaces' == router.output assert 'rtt=9999' == router.perf_data assert False == router.problem_has_been_acknowledged # Now with utf-8 encoded data excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Output contains crappy ' \ u'characters èàçé and spaces|rtt=9999' % int(time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert u'Output contains crappy characters èàçé and spaces' == router.output assert u'rtt=9999' == router.perf_data assert False == router.problem_has_been_acknowledged # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Remove acknowledge router excmd = '[%d] REMOVE_HOST_ACKNOWLEDGEMENT;test_router_0' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert False == router.problem_has_been_acknowledged # Router is Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert 'Router is Down' == router.output # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Router is now Up excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;0;Router is Up' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == router.state assert 'Router is Up' == router.output # Acknowledge disappeared because host went OK assert False == router.problem_has_been_acknowledged def test_passive_checks_only_passively_checked(self): """ Test passive host/service checks as external commands Hosts and services are only passive checks enabled :return: """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print(("Host: %s - state: %s/%s" % (host, host.state_type, host.state))) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False router.active_checks_enabled = True router.passive_checks_enabled = True print(("Router: %s - state: %s/%s" % (router, router.state_type, router.state))) assert router is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print(("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state))) # Passive checks for hosts # --------------------------------------------- # Receive passive host check Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is DOWN' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[router, 0, 'Host is UP']]) assert 'DOWN' == host.state assert 'Host is DOWN' == host.output # Receive passive host check Unreachable excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;1;Host is Unreachable' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[router, 0, 'Router is UP']]) assert 'DOWN' == host.state assert 'Host is Unreachable' == host.output router_last_check = router.last_chk # Receive passive host check Up excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'Host is UP' == host.output # Passive checks with performance data # --------------------------------------------- # Now with performance data excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP|rtt=9999' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'Host is UP' == host.output assert 'rtt=9999' == host.perf_data # Now with full-blown performance data. Here we have to watch out: # Is a ";" a separator for the external command or is it # part of the performance data? excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host.state assert 'Host is UP' == host.output assert 'rtt=9999;5;10;0;10000' == host.perf_data # Passive checks for services # --------------------------------------------- # Receive passive service check Warning excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;' \ 'Service is WARNING' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[host, 0, 'Host is UP']]) assert 'WARNING' == svc.state assert 'Service is WARNING' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;' \ 'Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert True == svc.problem_has_been_acknowledged # Remove acknowledge service excmd = '[%d] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert False == svc.problem_has_been_acknowledged # Receive passive service check Critical excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;2;' \ 'Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[host, 0, 'Host is UP']]) assert 'CRITICAL' == svc.state assert 'Service is CRITICAL' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;' \ 'Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'CRITICAL' == svc.state assert True == svc.problem_has_been_acknowledged # Service is going ok ... excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;0;' \ 'Service is OK|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'OK' == svc.state assert 'Service is OK' == svc.output assert 'rtt=9999;5;10;0;10000' == svc.perf_data # Acknowledge disappeared because service went OK assert False == svc.problem_has_been_acknowledged # Passive checks for hosts - special case # --------------------------------------------- # With timestamp in the past (before the last host check time!) # The check is ignored because too late in the past past = router_last_check - 30 excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % past self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == router.state assert 'Router is UP' == router.output # With timestamp in the past (- 1 seconds) # The check is accepted because it is equal or after the last host check time.sleep(2) past = router_last_check excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % past self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert 'Router is Down' == router.output assert router.last_chk == past # With timestamp in the past (- 3600 seconds) # The check is not be accepted very_past = int(time.time() - 3600) excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;0;Router is Up' % very_past self._scheduler.run_external_commands([excmd]) self.external_command_loop() # Router do not change state! assert 'DOWN' == router.state assert 'Router is Down' == router.output assert router.last_chk == past # Now with crappy characters, like é excmd = u'[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Output contains crappy ' \ u'character èàçé and spaces|rtt=9999' % int(time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert u'Output contains crappy character èàçé and spaces' == router.output assert 'rtt=9999' == router.perf_data assert False == router.problem_has_been_acknowledged # Now with utf-8 data excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Output contains crappy ' \ 'characters èàçé and spaces|rtt=9999' % int(time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert u'Output contains crappy characters èàçé and spaces' == router.output assert u'rtt=9999' == router.perf_data assert False == router.problem_has_been_acknowledged # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Remove acknowledge router excmd = '[%d] REMOVE_HOST_ACKNOWLEDGEMENT;test_router_0' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert False == router.problem_has_been_acknowledged # Router is Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert 'Router is Down' == router.output # TODO: to be confirmed ... host should be unreachable because of its dependency with router # self.assertEqual('DOWN', host.state) # self.assertEqual('Router is Down', router.output) # self.assertEqual(router.last_chk, past) # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Router is now Up excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;0;Router is Up' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == router.state assert 'Router is Up' == router.output # Acknowledge disappeared because host went OK assert False == router.problem_has_been_acknowledged @pytest.mark.skip("Currently disabled - to be refactored!") def test_unknown_check_result_command_scheduler(self): """ Unknown check results commands managed by the scheduler :return: """ # ----- first part # ----- # Our scheduler External Commands Manager DOES ACCEPT unknown passive checks... # self._scheduler.cur_conf.accept_passive_unknown_check_results = True self._scheduler.external_commands_manager.accept_passive_unknown_check_results = True # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The scheduler receives a known host but unknown service service_check_result excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;unknownservice;1;' \ 'Service is WARNING|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) # We get an 'unknown_service_check_result'... broks = [] # Broks from my scheduler brokers for broker_link_uuid in self._scheduler.my_daemon.brokers: broks.extend( [b for b in self._scheduler.my_daemon.brokers[broker_link_uuid].broks]) for b in broks: print("Brok: %s" % b) broks = [b for b in broks if b.type == 'unknown_service_check_result'] assert len(broks) == 1 # ...but no logs assert 0 == self.count_logs() # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The scheduler receives and unknown host and service service_check_result excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;unknownhost;unknownservice;1;' \ 'Service is WARNING|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) # We get an 'unknown_service_check_result'... broks = [b for b in self._main_broker.broks if b.type == 'unknown_service_check_result'] assert len(broks) == 1 # ...but no logs assert 0 == self.count_logs() # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The scheduler receives an unknown host host_check_result excmd = '[%d] PROCESS_HOST_CHECK_RESULT;unknownhost;' \ '1;Host is UP|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) # A brok... broks = [b for b in self._main_broker.broks if b.type == 'unknown_host_check_result'] assert len(broks) == 1 # ...but no logs assert 0 == self.count_logs() # ----- second part # Our scheduler External Commands Manager DOES NOT ACCEPT unknown passive checks... # self._scheduler.cur_conf.accept_passive_unknown_check_results = False self._scheduler.external_commands_manager.accept_passive_unknown_check_results = False # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The scheduler receives a known host but unknown service service_check_result excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;unknownservice;1;' \ 'Service is WARNING|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) # No brok... broks = [b for b in self._main_broker.broks if b.type == 'unknown_service_check_result'] assert len(broks) == 0 # ...but a log self.show_logs() self.assert_log_match( 'A command was received for the service .* on host .*, ' 'but the service could not be found!') # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The scheduler receives an unknown host and service service_check_result excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;unknownhost;unknownservice;1;' \ 'Service is WARNING|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) # No brok... broks = [b for b in self._main_broker.broks if b.type == 'unknown_service_check_result'] assert len(broks) == 0 # ...but a log self.show_logs() self.assert_log_match( 'A command was received for the service .* on host .*, ' 'but the service could not be found!') # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The scheduler receives an unknown host host_check_result excmd = '[%d] PROCESS_HOST_CHECK_RESULT;unknownhost;' \ '1;Host is UP|rtt=9999;5;10;0;10000' % time.time() self._scheduler.run_external_commands([excmd]) # No brok... broks = [b for b in self._main_broker.broks if b.type == 'unknown_host_check_result'] assert len(broks) == 0 # ...but a log self.show_logs() self.assert_log_match( 'A command was received for the host .*, ' 'but the host could not be found!') @pytest.mark.skip("Currently disabled - to be refactored!") def test_unknown_check_result_command_receiver(self): """ Unknown check results commands managed by the receiver :return: """ # ----- first part # Our receiver External Commands Manager DOES ACCEPT unknown passive checks... # This is to replace the normal setup_new_conf ... self._receiver.accept_passive_unknown_check_results = True # Now create the external commands manager # We are a receiver: our role is to get and dispatch commands to the schedulers self._receiver.external_commands_manager = \ ExternalCommandManager(None, 'receiver', self._receiver_daemon, self._receiver.accept_passive_unknown_check_results) # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The receiver receives an unknown service external command excmd = ExternalCommand('[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;unknownservice;' '1;Service is WARNING|rtt=9999;5;10;0;10000' % time.time()) # This will simply push te commands to the schedulers ... self._receiver_daemon.unprocessed_external_commands.append(excmd) self._receiver_daemon.push_external_commands_to_schedulers() self.external_command_loop() broks = [] # Broks from my scheduler brokers for broker_link_uuid in self._scheduler.my_daemon.brokers: print(("Broker: %s" % self._scheduler.my_daemon.brokers[broker_link_uuid])) broks.extend([b for b in list( self._scheduler.my_daemon.brokers[broker_link_uuid].broks.values())]) for b in broks: print(("Brok: %s" % b)) # for brok in self._main_broker.broks, key=lambda x: x.creation_time): # print("Brok: %s" % brok) for brok in sorted(list(self._receiver_daemon.broks.values()), key=lambda x: x.creation_time): print("--Brok: %s" % brok) broks = [b for b in broks if b.type == 'unknown_service_check_result'] assert len(broks) == 1 # ...but no logs! self.show_logs() self.assert_no_log_match('Passive check result was received for host .*, ' 'but the host could not be found!') # ----- second part # Our receiver External Commands Manager DOES NOT ACCEPT unknown passive checks... # This is to replace the normal setup_new_conf ... self._receiver.accept_passive_unknown_check_results = False self._receiver.external_commands_manager.accept_passive_unknown_check_results = False # Clear logs and broks self.clear_logs() self._main_broker.broks = [] # The receiver receives an unknown service external command excmd = ExternalCommand('[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;unknownservice;' '1;Service is WARNING|rtt=9999;5;10;0;10000' % time.time()) self._receiver.unprocessed_external_commands.append(excmd) self._receiver.push_external_commands_to_schedulers() # No brok... broks = [b for b in self._main_broker.broks if b.type == 'unknown_service_check_result'] assert len(broks) == 0 # ...but a log self.show_logs() self.assert_any_log_match("External command was received for host 'test_host_0', " "but the host could not be found!") def test_unknown_check_result_brok(self): """ Unknown check results commands in broks :return: """ # unknown_host_check_result_brok excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is UP' expected = { 'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0', 'output': 'Host is UP', 'perf_data': None } brok = ExternalCommandManager.get_unknown_check_result_brok(excmd) print("Brok: %s" % brok) # the prepare method returns the brok data assert expected == brok.prepare() # unknown_host_check_result_brok with perfdata excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is UP|rtt=9999' expected = { 'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0', 'output': 'Host is UP', 'perf_data': 'rtt=9999' } brok = ExternalCommandManager.get_unknown_check_result_brok(excmd) assert expected == brok.prepare() # unknown_service_check_result_brok excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;host-checked;0;Everything OK' expected = { 'time_stamp': 1234567890, 'return_code': '0', 'host_name': 'host-checked', 'output': 'Everything OK', 'perf_data': None } brok = ExternalCommandManager.get_unknown_check_result_brok(excmd) assert expected == brok.prepare() # unknown_service_check_result_brok with perfdata excmd = '[1234567890] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Service is WARNING|rtt=9999;5;10;0;10000' expected = { 'host_name': 'test_host_0', 'time_stamp': 1234567890, 'service_description': 'test_ok_0', 'return_code': '1', 'output': 'Service is WARNING', 'perf_data': 'rtt=9999;5;10;0;10000' } brok = ExternalCommandManager.get_unknown_check_result_brok(excmd) assert expected == brok.prepare() def test_services_acknowledge(self): """ Test services acknowledge :return: """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print(("Host: %s - state: %s/%s" % (host, host.state_type, host.state))) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False router.active_checks_enabled = True router.passive_checks_enabled = True print(("Router: %s - state: %s/%s" % (router, router.state_type, router.state))) assert router is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print(("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state))) # Passive checks for services # --------------------------------------------- # Receive passive service check Warning excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Service is WARNING' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[host, 0, 'Host is UP']]) assert 'WARNING' == svc.state assert 'Service is WARNING' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert True == svc.problem_has_been_acknowledged # Add a comment excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert True == svc.problem_has_been_acknowledged # Remove acknowledge service excmd = '[%d] REMOVE_SVC_ACKNOWLEDGEMENT;test_host_0;test_ok_0' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'WARNING' == svc.state assert False == svc.problem_has_been_acknowledged # Receive passive service check Critical excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;2;Service is CRITICAL' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[host, 0, 'Host is UP']]) assert 'CRITICAL' == svc.state assert 'Service is CRITICAL' == svc.output assert False == svc.problem_has_been_acknowledged # Acknowledge service excmd = '[%d] ACKNOWLEDGE_SVC_PROBLEM;test_host_0;test_ok_0;2;1;1;Big brother;Acknowledge service' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'CRITICAL' == svc.state assert True == svc.problem_has_been_acknowledged # Service is going ok ... excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;0;Service is OK' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'OK' == svc.state assert 'Service is OK' == svc.output # Acknowledge disappeared because service went OK assert False == svc.problem_has_been_acknowledged def test_hosts_checks(self): """ Test hosts checks :return: """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.act_depend_of = [] # ignore the router which we depend of host.event_handler_enabled = False print(("Host: %s - state: %s/%s" % (host, host.state_type, host.state))) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False print(("Router: %s - state: %s/%s" % (router, router.state_type, router.state))) assert router is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print(("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state))) # Passive checks for hosts - active only checks # ------------------------------------------------ host.active_checks_enabled = True host.passive_checks_enabled = False # Disabled router.active_checks_enabled = True router.passive_checks_enabled = False # Disabled # Host is DOWN # Set active host as DOWN self.scheduler_loop(1, [[host, 2, 'Host is DOWN']]) # excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is DOWN' % int(time.time()) # self.schedulers['scheduler-master'].sched.run_external_command(excmd) # self.external_command_loop() # New checks: test host, dependent host and service (because active checks are enabled) self.assert_checks_count(2) self.show_checks() self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') assert 'DOWN' == host.state assert 'Host is DOWN' == host.output assert False == host.problem_has_been_acknowledged # Host is UP # Set active host as DOWN self.scheduler_loop(1, [[host, 0, 'Host is UP']]) # excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP' % int(time.time()) # self.schedulers['scheduler-master'].sched.run_external_command(excmd) # self.external_command_loop() # New checks: test dependent host and service (because active checks are enabled) self.show_checks() self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') assert 'UP' == host.state assert 'Host is UP' == host.output assert False == host.problem_has_been_acknowledged # Passive checks for hosts - active/passive checks # ------------------------------------------------ host.active_checks_enabled = True host.passive_checks_enabled = True router.active_checks_enabled = True router.passive_checks_enabled = True # Host is DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is DOWN' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() # New checks: test dependent host and service (because active checks are enabled) self.show_checks() self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'DOWN' == host.state assert 'Host is DOWN' == host.output assert False == host.problem_has_been_acknowledged # Host is UP excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() # New checks: test dependent host and service (because active checks are enabled) self.show_checks() self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'UP' == host.state assert 'Host is UP' == host.output assert False == host.problem_has_been_acknowledged # Passive checks for hosts - passive only checks # ------------------------------------------------ # TODO: For hosts that are only passively checked, the scheduler should not create # new checks for the dependent services and should only create a check for an host # which we depend upon if this host is not only passively checked ! # It does not seem logical to try checking actively elements that are passive only! host.active_checks_enabled = False # Disabled host.passive_checks_enabled = True router.active_checks_enabled = False # Disabled router.passive_checks_enabled = True # Host is DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is DOWN' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() # New checks: test dependent host and service (because active checks are enabled) # Should not have new checks scheduled because the host is only passively checked self.show_checks() self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'DOWN' == host.state assert 'Host is DOWN' == host.output assert False == host.problem_has_been_acknowledged # Host is UP excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;0;Host is UP' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() # New checks: test dependent host and service (because active checks are enabled) self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'UP' == host.state assert 'Host is UP' == host.output assert False == host.problem_has_been_acknowledged def test_hosts_acknowledge(self): """ Test hosts acknowledge :return: """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print(("Host: %s - state: %s/%s" % (host, host.state_type, host.state))) assert host is not None # Get dependent host router = self._scheduler.hosts.find_by_name("test_router_0") router.checks_in_progress = [] router.event_handler_enabled = False router.active_checks_enabled = True router.passive_checks_enabled = True print(("Router: %s - state: %s/%s" % (router, router.state_type, router.state))) assert router is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print(("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state))) # Passive checks for hosts - special case # --------------------------------------------- # Host is DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Host is DOWN' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.show_checks() self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'DOWN' == router.state assert 'Host is DOWN' == router.output assert False == router.problem_has_been_acknowledged # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Remove acknowledge router excmd = '[%d] REMOVE_HOST_ACKNOWLEDGEMENT;test_router_0' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert False == router.problem_has_been_acknowledged # Host is DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Host is DOWN' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert 'Host is DOWN' == router.output assert False == router.problem_has_been_acknowledged # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Remove acknowledge router excmd = '[%d] REMOVE_HOST_ACKNOWLEDGEMENT;test_router_0' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert False == router.problem_has_been_acknowledged # Router is Down excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;2;Router is Down' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'DOWN' == router.state assert 'Router is Down' == router.output # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_router_0;2;1;1;Big brother;test' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", router.state, router.problem_has_been_acknowledged) assert 'DOWN' == router.state assert True == router.problem_has_been_acknowledged # Router is now Up excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_router_0;0;Router is Up' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == router.state assert 'Router is Up' == router.output # Acknowledge disappeared because host went OK assert False == router.problem_has_been_acknowledged def test_hosts_services_acknowledge(self): """ Test hosts with some attached services acknowledge :return: """ # Get host host = self._scheduler.hosts.find_by_name('test_host_0') host.checks_in_progress = [] host.act_depend_of = [] host.event_handler_enabled = False host.active_checks_enabled = True host.passive_checks_enabled = True print(("Host: %s - state: %s/%s" % (host, host.state_type, host.state))) assert host is not None # Get service svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.event_handler_enabled = False svc.active_checks_enabled = True svc.passive_checks_enabled = True assert svc is not None print(("Service: %s - state: %s/%s" % (svc, svc.state_type, svc.state))) # Passive checks for the host and its service # --------------------------------------------- # Service is WARNING excmd = '[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Service is WARNING' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.scheduler_loop(1, [[host, 0, 'Host is UP']]) assert 'WARNING' == svc.state assert 'Service is WARNING' == svc.output # The service is not acknowledged assert False == svc.problem_has_been_acknowledged # Host is DOWN excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Host is DOWN' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() self.show_checks() self.assert_checks_count(2) self.assert_checks_match(0, 'test_hostcheck.pl', 'command') self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'test_servicecheck.pl', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') self.assert_checks_match(1, 'servicedesc test_ok_0', 'command') assert 'DOWN' == host.state assert 'Host is DOWN' == host.output assert False == host.problem_has_been_acknowledged # Acknowledge router excmd = '[%d] ACKNOWLEDGE_HOST_PROBLEM;test_host_0;2;1;1;Big brother;test' % int( time.time()) self._scheduler.run_external_commands([excmd]) self.external_command_loop() print("Host state", host.state, host.problem_has_been_acknowledged) assert 'DOWN' == host.state assert True == host.problem_has_been_acknowledged print("Service state", svc.state, svc.problem_has_been_acknowledged) assert 'WARNING' == svc.state # The service has also been acknowledged! assert True == svc.problem_has_been_acknowledged
class TestExternalCommandsPassiveChecks(AlignakTest): ''' This class tests the external commands for passive checks ''' def setUp(self): pass def test_passive_checks_active_passive(self): ''' Test passive host/service checks as external commands Hosts and services are active/passive checks enabled :return: ''' pass def test_passive_checks_only_passively_checked(self): ''' Test passive host/service checks as external commands Hosts and services are only passive checks enabled :return: ''' pass @pytest.mark.skip("Currently disabled - to be refactored!") def test_unknown_check_result_command_scheduler(self): ''' Unknown check results commands managed by the scheduler :return: ''' pass @pytest.mark.skip("Currently disabled - to be refactored!") def test_unknown_check_result_command_receiver(self): ''' Unknown check results commands managed by the receiver :return: ''' pass def test_unknown_check_result_brok(self): ''' Unknown check results commands in broks :return: ''' pass def test_services_acknowledge(self): ''' Test services acknowledge :return: ''' pass def test_hosts_checks(self): ''' Test hosts checks :return: ''' pass def test_hosts_acknowledge(self): ''' Test hosts acknowledge :return: ''' pass def test_hosts_services_acknowledge(self): ''' Test hosts with some attached services acknowledge :return: ''' pass
13
10
112
11
77
25
2
0.33
1
5
2
0
10
0
10
65
1,138
121
768
51
755
255
718
49
707
4
2
1
15
3,918
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_monitor.py
tests.test_monitor.TestMonitor
class TestMonitor(AlignakTest): """ This class tests the dispatcher (distribute configuration to satellites) """ def setUp(self): """Test starting""" super(TestMonitor, self).setUp() # Log at DEBUG level self.set_unit_tests_logger_level() def _monitoring(self, env_filename='tests/cfg/monitor/simple.ini', loops=3, multi_realms=False): """ monitoring process: prepare, check, dispatch This function realize all the monitoring operations: - load a monitoring configuration - prepare the monitoring - dispatch - check the correct monitoring, including: - check the configuration dispatched to the schedulers - check the configuration dispatched to the spare arbiter (if any) - run the check_reachable loop several times if multi_realms is True, the scheduler configuration received are not checked against the arbiter whole configuration. This would be really too complex to assert on this :( Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...) Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...) :return: None """ args = { 'env_file': env_filename, 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master' } my_arbiter = Arbiter(**args) my_arbiter.setup_alignak_logger() # Clear logs self.clear_logs() # my_arbiter.load_modules_manager() my_arbiter.load_monitoring_config_file() assert my_arbiter.conf.conf_is_correct is True # #1 - Get a new dispatcher my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself) my_arbiter.dispatcher = my_dispatcher print("*** All daemons WS: %s" % ["%s:%s" % (link.address, link.port) for link in my_dispatcher.all_daemons_links]) assert my_arbiter.alignak_monitor == "http://super_alignak:7773/ws" assert my_arbiter.alignak_monitor_username == 'admin' assert my_arbiter.alignak_monitor_password == 'admin' metrics = [] for type in sorted(my_arbiter.conf.types_creations): _, _, strclss, _, _ = my_arbiter.conf.types_creations[type] if strclss in ['hostescalations', 'serviceescalations']: continue objects_list = getattr(my_arbiter.conf, strclss, []) metrics.append("'%s'=%d" % (strclss, len(objects_list))) # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mr: mr.post('%s/login' % (my_arbiter.alignak_monitor), json={ "_status": "OK", "_result": ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"] }) mr.patch('%s/host' % (my_arbiter.alignak_monitor), json={ "_status": "OK", "_result": ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"] }) # Time warp 5 seconds - overpass the ping period... self.clear_logs() # frozen_datetime.tick(delta=datetime.timedelta(seconds=5)) my_arbiter.get_alignak_status(details=False) self.show_logs() # Hack the requests history to check and simulate the configuration pushed... history = mr.request_history for index, request in enumerate(history): # Check what is patched on /host ... if 'host' in request.url: received = request.json() print((index, request.url, received)) from pprint import pprint pprint(received) assert received['name'] == 'My Alignak' assert received['livestate']['timestamp'] == 1519583400 assert received['livestate']['state'] == 'up' assert received['livestate']['output'] == 'Some of my daemons are not reachable.' for metric in metrics: assert metric in received['livestate']['perf_data'] print(received['livestate']['long_output']) # Long output is sorted by daemon name assert received['livestate']['long_output'] == \ u'broker-master - daemon is not reachable.\n' \ u'poller-master - daemon is not reachable.\n' \ u'reactionner-master - daemon is not reachable.\n' \ u'receiver-master - daemon is not reachable.\n' \ u'scheduler-master - daemon is not reachable.' for link in my_dispatcher.all_daemons_links: assert link.name in [service['name'] for service in received['services']] for service in received['services']: assert 'name' in service assert 'livestate' in service assert 'timestamp' in service['livestate'] assert 'state' in service['livestate'] assert 'output' in service['livestate'] assert 'long_output' in service['livestate'] assert 'perf_data' in service['livestate'] @freeze_time("2018-02-25 18:30:00") def test_monitoring_simple(self): """ Test the monitoring process: simple configuration :return: None """ self._monitoring() @pytest.mark.skip("Only for local tests ... directly send information to a monitor host.") def test_real(self): args = { 'env_file': os.path.join(self._test_dir, 'cfg/monitor/simple.ini'), 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master' } my_arbiter = Arbiter(**args) my_arbiter.setup_alignak_logger() # Clear logs self.clear_logs() my_arbiter.alignak_monitor = "http://alignak-mos-ws.kiosks.ipmfrance.com" my_arbiter.alignak_monitor_username = 'admin' my_arbiter.alignak_monitor_password = 'ipm-France2017' # my_arbiter.load_modules_manager() my_arbiter.load_monitoring_config_file() assert my_arbiter.conf.conf_is_correct is True # #1 - Get a new dispatcher my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself) my_arbiter.dispatcher = my_dispatcher print("*** All daemons WS: %s" % ["%s:%s" % (link.address, link.port) for link in my_dispatcher.all_daemons_links]) my_arbiter.push_passive_check(details=False)
class TestMonitor(AlignakTest): ''' This class tests the dispatcher (distribute configuration to satellites) ''' def setUp(self): '''Test starting''' pass def _monitoring(self, env_filename='tests/cfg/monitor/simple.ini', loops=3, multi_realms=False): ''' monitoring process: prepare, check, dispatch This function realize all the monitoring operations: - load a monitoring configuration - prepare the monitoring - dispatch - check the correct monitoring, including: - check the configuration dispatched to the schedulers - check the configuration dispatched to the spare arbiter (if any) - run the check_reachable loop several times if multi_realms is True, the scheduler configuration received are not checked against the arbiter whole configuration. This would be really too complex to assert on this :( Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...) Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...) :return: None ''' pass @freeze_time("2018-02-25 18:30:00") def test_monitoring_simple(self): ''' Test the monitoring process: simple configuration :return: None ''' pass @pytest.mark.skip("Only for local tests ... directly send information to a monitor host.") def test_real(self): pass
7
4
38
7
23
8
3
0.36
1
4
2
0
4
0
4
59
161
30
96
23
88
35
71
20
65
8
2
4
11
3,919
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_multibroker.py
tests.test_multibroker.TestMultibroker
class TestMultibroker(AlignakTest): """ This class test the multibroker in schedulers """ def setUp(self): super(TestMultibroker, self).setUp() def test_multibroker_onesched(self): """ Test with 2 brokers and 1 scheduler :return: None """ self.setup_with_file('cfg/multibroker/cfg_multi_broker_one_scheduler.cfg', dispatching=True) my_scheduler = self._scheduler assert 2 == len(my_scheduler.my_daemon.brokers) # create broks host = my_scheduler.pushed_conf.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = my_scheduler.pushed_conf.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no raised host check on critical service check result self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) # Count broks in each broker broker_broks_count = {} broker1_link_uuid = None broker2_link_uuid = None for broker_link_uuid in my_scheduler.my_daemon.brokers: if my_scheduler.my_daemon.brokers[broker_link_uuid].name == 'broker-master': broker1_link_uuid = broker_link_uuid else: broker2_link_uuid = broker_link_uuid broker_broks_count[my_scheduler.my_daemon.brokers[broker_link_uuid].name] = 0 print(("Broker %s:" % (my_scheduler.my_daemon.brokers[broker_link_uuid]))) for brok in my_scheduler.my_daemon.brokers[broker_link_uuid].broks: broker_broks_count[my_scheduler.my_daemon.brokers[broker_link_uuid].name] += 1 print("- %s" % brok) # Same list of broks in the two brokers self.assertItemsEqual(my_scheduler.my_daemon.brokers[broker1_link_uuid].broks, my_scheduler.my_daemon.brokers[broker2_link_uuid].broks) # Scheduler HTTP interface sched_interface = SchedulerInterface(my_scheduler.my_daemon) # Test broker-master that gets its broks from the scheduler # Get the scheduler broks to be sent ... print("Broks to be sent:") to_send = [b for b in my_scheduler.my_daemon.brokers[broker1_link_uuid].broks if getattr(b, 'sent_to_externals', False)] for brok in to_send: print(("- %s" % (brok))) assert 6 == len(to_send) broks_list = sched_interface._broks('broker-master') broks_list = unserialize(broks_list, True) assert 6 == len(broks_list) assert broker_broks_count['broker-master'] == len(broks_list) # No more broks to get # Get the scheduler broks to be sent ... to_send = [b for b in my_scheduler.my_daemon.brokers[broker1_link_uuid].broks if not getattr(b, 'got', False)] assert 0 == len(to_send), "Still some broks to be sent!" # Test broker-master 2 that gets its broks from the scheduler # Get the scheduler broks to be sent ... to_send = [b for b in my_scheduler.my_daemon.brokers[broker2_link_uuid].broks if getattr(b, 'sent_to_externals', False)] print("Broks to be sent:") for brok in to_send: print(("- %s" % (brok))) assert 6 == len(to_send) broks_list = sched_interface._broks('broker-master2') broks_list = unserialize(broks_list, True) assert 6 == len(broks_list) assert broker_broks_count['broker-master2'] == len(broks_list) # No more broks to get # Get the scheduler broks to be sent ... to_send = [b for b in my_scheduler.my_daemon.brokers[broker2_link_uuid].broks if not getattr(b, 'got', False)] assert 0 == len(to_send), "Still some broks to be sent!" # Test unknown broker that gets its broks from the scheduler broks_list = sched_interface._broks('broker-unknown') broks_list = unserialize(broks_list, True) assert 0 == len(broks_list) # Re-get broks # Test broker-master that gets its broks from the scheduler broks_list = sched_interface._broks('broker-master') broks_list = unserialize(broks_list, True) # No broks ! assert 0 == len(broks_list) # Test broker-master 2 that gets its broks from the scheduler broks_list = sched_interface._broks('broker-master2') broks_list = unserialize(broks_list, True) # No broks ! assert 0 == len(broks_list) # Some new broks self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) # Same list of broks in the two brokers self.assertItemsEqual(my_scheduler.my_daemon.brokers[broker1_link_uuid].broks, my_scheduler.my_daemon.brokers[broker2_link_uuid].broks) assert len(my_scheduler.my_daemon.brokers[broker1_link_uuid].broks) > 1 assert len(my_scheduler.my_daemon.brokers[broker2_link_uuid].broks) > 1 def test_multibroker_multisched(self): """ Test with 2 brokers and 2 schedulers :return: None """ self.setup_with_file('cfg/multibroker/cfg_multi_broker_multi_scheduler.cfg', dispatching=True) self.clear_logs() assert 2 == len(self.schedulers) my_first_scheduler = self._schedulers['scheduler-master'] my_second_scheduler = self._schedulers['scheduler-master2'] print(("Sched #1 %d hosts: %s" % (len(my_first_scheduler.hosts), my_first_scheduler.hosts))) print(("Sched #2 %d hosts: %s" % (len(my_second_scheduler.hosts), my_second_scheduler.hosts))) # if len(my_first_scheduler.hosts) == 1: my_first_scheduler = self._schedulers['scheduler-master2'] my_second_scheduler = self._schedulers['scheduler-master'] # Two brokers in first scheduler print(("Sched #1 brokers: %s" % my_first_scheduler.my_daemon.brokers)) assert 2 == len(my_first_scheduler.my_daemon.brokers) sched1_first_broker = None for broker_uuid in my_first_scheduler.my_daemon.brokers: broker = my_first_scheduler.my_daemon.brokers[broker_uuid] if broker.name == 'broker-master': sched1_first_broker = broker break else: assert False, "Scheduler 1 - No broker master link!" sched1_second_broker = None for broker_uuid in my_second_scheduler.my_daemon.brokers: broker = my_second_scheduler.my_daemon.brokers[broker_uuid] if broker.name == 'broker-master2': sched1_second_broker = broker break else: assert False, "Scheduler 1 - No broker master 2 link!" # Two brokers in second scheduler print(("Sched #2 brokers: %s" % my_second_scheduler.my_daemon.brokers)) assert 2 == len(my_second_scheduler.my_daemon.brokers) sched2_first_broker = None for broker_uuid in my_second_scheduler.my_daemon.brokers: broker = my_second_scheduler.my_daemon.brokers[broker_uuid] if broker.name == 'broker-master': sched2_first_broker = broker break else: assert False, "Scheduler 2 - No broker master link!" sched2_second_broker = None for broker_uuid in my_second_scheduler.my_daemon.brokers: broker = my_second_scheduler.my_daemon.brokers[broker_uuid] if broker.name == 'broker-master2': sched2_second_broker = broker break else: assert False, "Scheduler 2 - No broker master 2 link!" # --- # Find hosts and services in my schedulers host1 = my_first_scheduler.hosts.find_by_name("test_host_0") host1.checks_in_progress = [] host1.act_depend_of = [] # ignore the router svc1 = my_first_scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc1.checks_in_progress = [] svc1.act_depend_of = [] # no hostchecks on critical checkresults host2 = my_second_scheduler.hosts.find_by_name("test_host_1") host2.checks_in_progress = [] # --- # Create broks in the first scheduler self.scheduler_loop( 1, [[host1, 0, 'UP'], [svc1, 0, 'OK']], my_first_scheduler) time.sleep(0.1) # --- # Check raised broks in the first scheduler brokers # 6 broks: new_conf, host_next_schedule (router), host_next_schedule (host), # service_next_schedule, host_check_result, service_check_result ref_broks_count = 6 # Count broks in each broker broker_broks_count = {} for broker_link_uuid in my_first_scheduler.my_daemon.brokers: broker_broks_count[broker_link_uuid] = 0 print(("Broker %s:" % (my_first_scheduler.my_daemon.brokers[broker_link_uuid]))) for brok in my_first_scheduler.my_daemon.brokers[broker_link_uuid].broks: broker_broks_count[broker_link_uuid] += 1 print("- %s" % brok) for broker_link_uuid in my_first_scheduler.my_daemon.brokers: assert broker_broks_count[broker_link_uuid] == ref_broks_count # --- # Create broks in the second scheduler self.scheduler_loop(1, [[host2, 0, 'UP']], my_second_scheduler) time.sleep(0.1) # --- # Check raised broks in the second scheduler brokers # 6 broks: new_conf, host_next_schedule (host), host_check_result ref_broks_count = 3 # Count broks in each broker broker_broks_count = {} for broker_link_uuid in my_second_scheduler.my_daemon.brokers: broker_broks_count[broker_link_uuid] = 0 print(("Broker %s:" % (my_second_scheduler.my_daemon.brokers[broker_link_uuid]))) for brok in my_second_scheduler.my_daemon.brokers[broker_link_uuid].broks: broker_broks_count[broker_link_uuid] += 1 print("- %s" % brok) for broker_link_uuid in my_second_scheduler.my_daemon.brokers: assert broker_broks_count[broker_link_uuid] == ref_broks_count @pytest.mark.skip("Temporary disabled...") def test_multibroker_multisched_realms(self): """ Test with realms / sub-realms All + sub (north + south): * broker-master * poller-masterAll All: * scheduler-master * poller-master North: * scheduler-masterN * broker-masterN South: * scheduler-masterS :return: None """ self.setup_with_file('cfg/multibroker/cfg_multi_broker_multi_sched_realms.cfg', 'cfg/multibroker/alignak-multi_broker_multi_sched_realms.ini', dispatching=True) # test right brokers sent to right schedulers smaster = self._schedulers['scheduler-master'] smaster_n = self._schedulers['scheduler-masterN'] smaster_s = self._schedulers['scheduler-masterS'] # Brokers of each scheduler for broker_link_uuid in smaster.my_daemon.brokers: assert smaster.my_daemon.brokers[broker_link_uuid].name == 'broker-master' assert 1 == len(smaster.my_daemon.brokers) for broker_link_uuid in smaster_s.my_daemon.brokers: assert smaster_s.my_daemon.brokers[broker_link_uuid].name == 'broker-master' assert 1 == len(smaster_s.my_daemon.brokers) for broker_link_uuid in smaster_n.my_daemon.brokers: assert smaster_n.my_daemon.brokers[broker_link_uuid].name in ['broker-master', 'broker-masterN'] assert 2 == len(smaster_n.my_daemon.brokers) brokermaster = None for sat in self._arbiter.dispatcher.satellites: if getattr(sat, 'broker_name', '') == 'broker-master': brokermaster = sat
class TestMultibroker(AlignakTest): ''' This class test the multibroker in schedulers ''' def setUp(self): pass def test_multibroker_onesched(self): ''' Test with 2 brokers and 1 scheduler :return: None ''' pass def test_multibroker_multisched(self): ''' Test with 2 brokers and 2 schedulers :return: None ''' pass @pytest.mark.skip("Temporary disabled...") def test_multibroker_multisched_realms(self): ''' Test with realms / sub-realms All + sub (north + south): * broker-master * poller-masterAll All: * scheduler-master * poller-master North: * scheduler-masterN * broker-masterN South: * scheduler-masterS :return: None ''' pass
6
4
70
12
43
17
7
0.41
1
2
1
0
4
0
4
59
286
50
173
38
167
71
160
37
155
16
2
2
29
3,920
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests/test_statsd.py
tests.test_statsd.TestCarbon
class TestCarbon(AlignakTest): """ This class test the Graphite interface """ def setUp(self): super(TestCarbon, self).setUp() # Log at DEBUG level self.set_unit_tests_logger_level() self.clear_logs() # Create our own stats manager... # do not use the global object to restart with a fresh one on each test self.statsmgr = Stats() self.fake_carbon = FakeCarbonServer(port=2003) def tearDown(self): self.fake_carbon.stop() self.fake_carbon.join() def test_statsmgr(self): """ Stats manager exists :return: """ assert 'statsmgr' in globals() def test_statsmgr_register_disabled(self): """ Stats manager is registered as disabled :return: """ # Register stats manager as disabled assert not self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=False) assert self.statsmgr.statsd_enabled is False assert self.statsmgr.broks_enabled is False assert self.statsmgr.statsd_sock is None assert self.statsmgr.metrics_count == 0 def test_statsmgr_register_disabled_broks(self): """ Stats manager is registered as disabled, but broks are enabled :return: """ # Register stats manager as disabled assert not self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=False, broks_enabled=True) assert self.statsmgr.statsd_enabled is False assert self.statsmgr.broks_enabled is True assert self.statsmgr.statsd_sock is None assert self.statsmgr.statsd_addr is None assert self.statsmgr.metrics_count == 0 def test_statsmgr_register_enabled(self): """ Stats manager is registered as enabled :return: """ # Register stats manager as enabled assert self.statsmgr.statsd_sock is None assert self.statsmgr.statsd_addr is None assert self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=True) assert self.statsmgr.statsd_enabled is True assert self.statsmgr.broks_enabled is False assert self.statsmgr.carbon is not None assert self.statsmgr.metrics_count == 0 index = 0 self.assert_log_match(re.escape( 'Graphite/carbon configuration for arbiter-master - localhost:2003, ' 'prefix: alignak, enabled: True, broks: False, file: None' ), index) index += 1 self.assert_log_match(re.escape( 'Sending arbiter-master statistics to: localhost:2003, prefix: alignak' ), index) def test_statsmgr_register_enabled_broks(self): """ Stats manager is registered as enabled and broks are enabled :return: """ # Register stats manager as enabled assert self.statsmgr.statsd_sock is None assert self.statsmgr.statsd_addr is None assert self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=True, broks_enabled=True) assert self.statsmgr.statsd_enabled is True assert self.statsmgr.broks_enabled is True assert self.statsmgr.carbon is not None assert self.statsmgr.metrics_count == 0 index = 0 self.assert_log_match(re.escape( 'Graphite/carbon configuration for arbiter-master - localhost:2003, ' 'prefix: alignak, enabled: True, broks: True, file: None' ), index) index += 1 self.assert_log_match(re.escape( 'Sending arbiter-master statistics to: localhost:2003, prefix: alignak' ), index) def test_statsmgr_connect(self): """ Test connection in disabled mode :return: """ # Register stats manager as disabled assert not self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=False) # Connect to StatsD server assert self.statsmgr.statsd_sock is None assert self.statsmgr.statsd_addr is None # This method is not usually called directly, but it must refuse the connection # if it not enabled assert not self.statsmgr.load_statsd() assert self.statsmgr.statsd_sock is None assert self.statsmgr.statsd_addr is None assert self.statsmgr.metrics_count == 0 def test_statsmgr_connect_port_error(self): """ Test connection with a bad port :return: """ # Register stats manager as enabled (another port than the default one) assert self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=8888, prefix='alignak', enabled=True) index = 0 self.assert_log_match(re.escape( 'Graphite/carbon configuration for arbiter-master - localhost:8888, ' 'prefix: alignak, enabled: True, broks: False, file: None' ), index) index += 1 self.assert_log_match(re.escape( 'Sending arbiter-master statistics to: localhost:8888, prefix: alignak' ), index) index += 1 self.assert_log_match(re.escape( 'Statistics for arbiter-master will be sent to localhost:8888.' ), index) index += 1 def test_statsmgr_timer(self): """ Test sending data for a timer :return: """ # Register stats manager as enabled self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=True, broks_enabled=True) assert self.statsmgr.metrics_count == 0 index = 0 self.assert_log_match(re.escape( 'Graphite/carbon configuration for arbiter-master - localhost:2003, ' 'prefix: alignak, enabled: True, broks: True, file: None' ), index) index += 1 self.assert_log_match(re.escape( 'Sending arbiter-master statistics to: localhost:2003, prefix: alignak' ), index) index += 1 self.assert_log_match(re.escape( 'Statistics for arbiter-master will be sent to localhost:2003.' ), index) index += 1 assert self.statsmgr.stats == {} # Create a metric statistic brok = self.statsmgr.timer('test', 0) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 1 # Get min, max, count and sum assert self.statsmgr.stats['test'] == (0, 0, 1, 0) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:0|ms' # ), 3) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'timer', 'metric': 'alignak.arbiter-master.test', 'value': 0, 'uom': 'ms' }} # Increment brok = self.statsmgr.timer('test', 1) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 2 # Get min, max, count (incremented) and sum assert self.statsmgr.stats['test'] == (0, 1, 2, 1) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:1000|ms' # ), 4) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'timer', 'metric': 'alignak.arbiter-master.test', 'value': 1000, 'uom': 'ms' }} # Increment - the function is called 'incr' but it does not increment, it sets the value! brok = self.statsmgr.timer('test', 12) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 3 # Get min, max, count (incremented) and sum (increased) assert self.statsmgr.stats['test'] == (0, 12, 3, 13) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:1000|ms' # ), 5) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'timer', 'metric': 'alignak.arbiter-master.test', 'value': 12000, 'uom': 'ms' }} def test_statsmgr_counter(self): """ Test sending data for a counter :return: """ # Register stats manager as enabled self.statsmgr.connect('broker-master', 'broker', host='localhost', port=2003, prefix='alignak', enabled=True, broks_enabled=True) index = 0 self.assert_log_match(re.escape( 'Graphite/carbon configuration for broker-master - localhost:2003, ' 'prefix: alignak, enabled: True, broks: True, file: None' ), index) index += 1 self.assert_log_match(re.escape( 'Sending broker-master statistics to: localhost:2003, prefix: alignak' ), index) index += 1 self.assert_log_match(re.escape( 'Statistics for broker-master will be sent to localhost:2003.' ), index) index += 1 assert self.statsmgr.stats == {} # Create a metric statistic brok = self.statsmgr.counter('test', 0) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 1 # Get min, max, count and sum assert self.statsmgr.stats['test'] == (0, 0, 1, 0) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:0|ms' # ), 3) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'counter', 'metric': 'alignak.broker-master.test', 'value': 0, 'uom': 'c' }} # Increment brok = self.statsmgr.counter('test', 1) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 2 # Get min, max, count (incremented) and sum assert self.statsmgr.stats['test'] == (0, 1, 2, 1) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:1000|ms' # ), 4) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'counter', 'metric': 'alignak.broker-master.test', 'value': 1, 'uom': 'c' }} # Increment - the function is called 'incr' but it does not increment, it sets the value! brok = self.statsmgr.counter('test', 12) assert len(self.statsmgr.stats) == 1 # Get min, max, count (incremented) and sum (increased) assert self.statsmgr.stats['test'] == (0, 12, 3, 13) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:1000|ms' # ), 5) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'counter', 'metric': 'alignak.broker-master.test', 'value': 12, 'uom': 'c' }} def test_statsmgr_gauge(self): """ Test sending data for a gauge :return: """ # Register stats manager as enabled self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=True, broks_enabled=True) index = 0 self.assert_log_match(re.escape( 'Graphite/carbon configuration for arbiter-master - localhost:2003, ' 'prefix: alignak, enabled: True, broks: True, file: None' ), index) index += 1 self.assert_log_match(re.escape( 'Sending arbiter-master statistics to: localhost:2003, prefix: alignak' ), index) index += 1 self.assert_log_match(re.escape( 'Statistics for arbiter-master will be sent to localhost:2003.' ), index) index += 1 assert self.statsmgr.stats == {} # Create a metric statistic brok = self.statsmgr.gauge('test', 0) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 1 # Get min, max, count and sum assert self.statsmgr.stats['test'] == (0, 0, 1, 0) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:0|ms' # ), 3) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'gauge', 'metric': 'alignak.arbiter-master.test', 'value': 0, 'uom': 'g' }} # Increment brok = self.statsmgr.gauge('test', 1) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 2 # Get min, max, count (incremented) and sum assert self.statsmgr.stats['test'] == (0, 1, 2, 1) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:1000|ms' # ), 4) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'gauge', 'metric': 'alignak.arbiter-master.test', 'value': 1, 'uom': 'g' }} # Increment - the function is called 'incr' but it does not increment, it sets the value! brok = self.statsmgr.gauge('test', 12) assert len(self.statsmgr.stats) == 1 # One more inner metric assert self.statsmgr.metrics_count == 3 # Get min, max, count (incremented) and sum (increased) assert self.statsmgr.stats['test'] == (0, 12, 3, 13) # self.assert_log_match(re.escape( # 'Sending data: alignak.arbiter-master.test:1000|ms' # ), 5) # Prepare brok and remove specific brok properties (for test purpose only... brok.prepare() brok.__dict__.pop('creation_time') brok.__dict__.pop('instance_id') brok.__dict__.pop('prepared') brok.__dict__.pop('uuid') brok.__dict__['data'].pop('ts') assert brok.__dict__ == {'type': 'alignak_stat', 'data': { 'type': 'gauge', 'metric': 'alignak.arbiter-master.test', 'value': 12, 'uom': 'g' }} def test_statsmgr_flush(self): """ Test sending several data at once to a Graphite server The stats manager do not send the metrics when it is configured for Graphite. It is needed to call the flush method periodically to send the stored metrics. :return: """ # Register stats manager as enabled self.statsmgr.connect('arbiter-master', 'arbiter', host='localhost', port=2003, prefix='alignak', enabled=True) assert self.statsmgr.metrics_count == 0 assert self.statsmgr.stats == {} self.clear_logs() # Flush but no metrics exist assert self.statsmgr.flush() self.clear_logs() # Create a timer metric self.statsmgr.timer('my_timer', 0) self.statsmgr.timer('my_timer', 1) self.statsmgr.timer('my_timer', 12) self.statsmgr.counter('my_counter', 3) self.statsmgr.gauge('my_gauge', 125) # 5 metrics stored assert self.statsmgr.metrics_count == 5 assert self.statsmgr.flush()
class TestCarbon(AlignakTest): ''' This class test the Graphite interface ''' def setUp(self): pass def tearDown(self): pass def test_statsmgr(self): ''' Stats manager exists :return: ''' pass def test_statsmgr_register_disabled(self): ''' Stats manager is registered as disabled :return: ''' pass def test_statsmgr_register_disabled_broks(self): ''' Stats manager is registered as disabled, but broks are enabled :return: ''' pass def test_statsmgr_register_enabled(self): ''' Stats manager is registered as enabled :return: ''' pass def test_statsmgr_register_enabled_broks(self): ''' Stats manager is registered as enabled and broks are enabled :return: ''' pass def test_statsmgr_connect(self): ''' Test connection in disabled mode :return: ''' pass def test_statsmgr_connect_port_error(self): ''' Test connection with a bad port :return: ''' pass def test_statsmgr_timer(self): ''' Test sending data for a timer :return: ''' pass def test_statsmgr_counter(self): ''' Test sending data for a counter :return: ''' pass def test_statsmgr_gauge(self): ''' Test sending data for a gauge :return: ''' pass def test_statsmgr_flush(self): ''' Test sending several data at once to a Graphite server The stats manager do not send the metrics when it is configured for Graphite. It is needed to call the flush method periodically to send the stored metrics. :return: ''' pass
14
12
36
3
24
9
1
0.38
1
3
2
0
13
2
13
68
478
47
312
25
298
119
209
25
195
1
2
0
13
3,921
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests_integ/test_daemons_api.py
tests_integ.test_daemons_api.TestDaemonsApi
class TestDaemonsApi(AlignakTest): """Test the daemons HTTP API""" def setUp(self): # Set an environment variable to change the default period of activity log (every 60 loops) os.environ['ALIGNAK_LOG_ACTIVITY'] = '1' # Set an environment variable to activate the logging of system cpu, memory and disk os.environ['ALIGNAK_DAEMON_MONITORING'] = '2' # Set an environment variable to activate the logging of checks execution # With this the pollers/schedulers will raise INFO logs about the checks execution os.environ['ALIGNAK_LOG_ACTIONS'] = 'WARNING' super(TestDaemonsApi, self).setUp() def tearDown(self): del os.environ['ALIGNAK_LOG_ACTIVITY'] del os.environ['ALIGNAK_DAEMON_MONITORING'] del os.environ['ALIGNAK_LOG_ACTIONS'] print("Test terminated!") def _prepare_my_configuration(self, daemons_list=None, remove_daemons=None, cfg_dir=None, realms=None): self.cfg_folder = '/tmp/alignak' if os.path.exists(self.cfg_folder): shutil.rmtree(self.cfg_folder) if realms is None: realms = ['All'] if cfg_dir is None: cfg_dir = 'default_many_hosts' hosts_count = 10 if remove_daemons is None: remove_daemons = [] if daemons_list is None: daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] # Default shipped configuration preparation self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder) # Specific daemon load configuration preparation if os.path.exists(os.path.join(self._test_dir, './cfg/%s/alignak.cfg' % cfg_dir)): shutil.copy(os.path.join(self._test_dir, './cfg/%s/alignak.cfg' % cfg_dir), '%s/etc' % self.cfg_folder) if os.path.exists(os.path.join(self._test_dir, '%s/etc/arbiter' % self.cfg_folder)): shutil.rmtree('%s/etc/arbiter' % self.cfg_folder) shutil.copytree(os.path.join(self._test_dir, './cfg/%s/arbiter' % cfg_dir), '%s/etc/arbiter' % self.cfg_folder) self._prepare_hosts_configuration(cfg_folder='%s/etc/arbiter/objects/hosts' % self.cfg_folder, hosts_count=hosts_count, target_file_name='hosts.cfg', realms=realms) # Some script commands must be copied in the test folder if os.path.exists(os.path.join(self._test_dir, './libexec/check_command.sh')): shutil.copy(os.path.join(self._test_dir, './libexec/check_command.sh'), '%s/check_command.sh' % self.cfg_folder) # Update the default configuration files files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) # Directory for running daemons cfg.set('alignak-configuration', 'daemons_script_location', '') cfg.set('alignak-configuration', 'launch_missing_daemons', '1') cfg.set('alignak-configuration', 'daemons_check_period', '5') cfg.set('alignak-configuration', 'daemons_stop_timeout', '3') cfg.set('alignak-configuration', 'daemons_start_timeout', '5') cfg.set('alignak-configuration', 'daemons_new_conf_timeout', '5') cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '5') cfg.set('alignak-configuration', 'min_workers', '1') cfg.set('alignak-configuration', 'max_workers', '1') cfg.set('alignak-configuration', 'log_cherrypy', '1') # A macro for the check script directory cfg.set('alignak-configuration', '_EXEC_DIR', self.cfg_folder) for daemon in daemons_list: if cfg.has_section('daemon.%s' % daemon): cfg.set('daemon.%s' % daemon, 'alignak_launched', '1') cfg.set('daemon.%s' % daemon, 'min_workers', '1') cfg.set('daemon.%s' % daemon, 'max_workers', '1') for daemon in remove_daemons: if cfg.has_section('daemon.%s' % daemon): print("Remove daemon: %s" % daemon) cfg.remove_section('daemon.%s' % daemon) if os.path.exists('%s/etc/alignak.d' % self.cfg_folder): # Remove default or former modules/daemons configuration! print("- removing %s/etc/alignak.d" % self.cfg_folder) shutil.rmtree('%s/etc/alignak.d' % self.cfg_folder) with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False def test_daemons_api_no_ssl(self): """ Running all the Alignak daemons - no SSL :return: """ self._prepare_my_configuration() self._run_daemons_and_test_api(ssl=False) @pytest.mark.skip("See #986 - SSL is broken with test files!") def test_daemons_api_ssl(self): """ Running all the Alignak daemons - with SSL :return: None """ # disable ssl warning # requests.packages.urllib3.disable_warnings() self._prepare_my_configuration() # Update the default configuration files files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) cfg.set('alignak-configuration', 'use_ssl', '1') cfg.set('alignak-configuration', 'server_cert', '%s/etc/certs/certificate_test.csr' % self.cfg_folder) cfg.set('alignak-configuration', 'server_key', '%s/etc/certs/certificate_test.key' % self.cfg_folder) # cfg.set('alignak-configuration', 'ca_cert', '%s/etc/certs/dhparams.pem' % self.cfg_folder) with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False self._run_daemons_and_test_api(ssl=True) def _run_daemons_and_test_api(self, ssl=False): """ Running all the Alignak daemons to check their correct launch and API responses This test concerns only the main API features ... :return: """ satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._run_alignak_daemons(cfg_folder=self.cfg_folder, daemons_list=daemons_list, runtime=5) scheme = 'http' if ssl: scheme = 'https' req = requests.Session() # ----- print("Testing api...") # name_to_interface = {'arbiter': ArbiterInterface, # 'scheduler': SchedulerInterface, # 'broker': BrokerInterface, # 'poller': GenericInterface, # 'reactionner': GenericInterface, # 'receiver': GenericInterface} doc = [] doc.append(".. _alignak_features/daemons_api:") doc.append("") doc.append(".. Built from the test_daemons_api.py unit test last run!") doc.append("") doc.append("===================") doc.append("Alignak daemons API") doc.append("===================") doc.append("") for name in sorted(satellite_map): port = satellite_map[name] print("%s, getting api..." % name) raw_data = req.get("%s://localhost:%s/api" % (scheme, port), verify=False) print("%s, api: %s" % (name, raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() print("API data: %s" % data) assert 'doc' in data assert 'api' in data doc.append("Daemon type: %s" % name) doc.append("-" * len("Daemon type: %s" % name)) for endpoint in data['api']: assert 'name' in endpoint assert 'doc' in endpoint assert 'args' in endpoint doc.append("/%s" % endpoint['name']) doc.append("~" * len("/%s" % endpoint['name'])) doc.append("") doc.append("Python source code documentation\n ::\n") doc.append(" %s" % endpoint['doc']) doc.append("") # expected_data = set(name_to_interface[name](None).api()) # assert set(data) == expected_data, "Daemon %s has a bad API!" % name # print('\n'.join(doc)) rst_write = None rst_file = "daemons_api.rst" if os.path.exists("../doc/source/api"): rst_write = "../doc/source/api/%s" % rst_file if os.path.exists("../../alignak-doc/source/07_alignak_features/api"): rst_write = "../../alignak-doc/source/07_alignak_features/api/%s" % rst_file if rst_write: # with open(rst_write, mode='wt', encoding='utf-8') as out: with open(rst_write, mode='wt') as out: out.write('\n'.join(doc)) # ----- # ----- print("Testing identity") for name, port in list(satellite_map.items()): raw_data = req.get("%s://localhost:%s/identity" % (scheme, port), verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # Same as get_id assert 'alignak' in data assert 'type' in data assert 'name' in data assert 'version' in data # + assert 'start_time' in data # + assert 'running_id' in data # ----- # ----- print("Testing satellites_list") # Arbiter only raw_data = req.get("%s://localhost:%s/satellites_list" % (scheme, satellite_map['arbiter']), verify=False) assert raw_data.status_code == 200 expected_data = {"reactionner": ["reactionner-master"], "broker": ["broker-master"], "arbiter": ["arbiter-master"], "scheduler": ["scheduler-master"], "receiver": ["receiver-master"], "poller": ["poller-master"]} data = raw_data.json() print("Satellites: %s" % json.dumps(data)) assert isinstance(data, dict), "Data is not a dict!" for k, v in expected_data.items(): assert set(data[k]) == set(v) # ----- # ----- print("Testing alignak status") # Arbiter only raw_data = req.get("%s://localhost:%s/status" % (scheme, satellite_map['arbiter']), verify=False) assert raw_data.status_code == 200 data = raw_data.json() print("Overall status: %s" % json.dumps(data)) assert "template" in data assert "livestate" in data assert "services" in data assert isinstance(data['services'], list) for service in data['services']: assert "name" in service assert service['name'] in ['arbiter-master', 'broker-master', 'poller-master', 'scheduler-master', 'reactionner-master', 'receiver-master'] assert "livestate" in service livestate = service['livestate'] assert "timestamp" in livestate assert "state" in livestate assert "output" in livestate assert "long_output" in livestate assert "perf_data" in livestate doc = list() doc.append(".. _alignak_features/alignak_status:") doc.append(".. Built from the test_daemons_api.py unit test last run!") doc.append("") doc.append("======================") doc.append("Alignak overall status") doc.append("======================") doc.append("An Alignak overall status example:") doc.append("") doc.append("::") doc.append("") doc.append(" %s" % json.dumps(data, sort_keys=True, indent=4)) doc.append("") rst_write = None rst_file = "alignak_status.rst" if os.path.exists("../doc/source/api"): rst_write = "../doc/source/api/%s" % rst_file if os.path.exists("../../alignak-doc/source/07_alignak_features/api"): rst_write = "../../alignak-doc/source/07_alignak_features/api/%s" % rst_file if rst_write: with open(rst_write, mode='wt') as out: out.write('\n'.join(doc)) # ----- # ----- print("Testing stats") doc = [] doc.append(".. _alignak_features/daemons_stats:") doc.append(".. Built from the test_daemons_api.py unit test last run!") doc.append("") doc.append("==========================") doc.append("Alignak daemons statistics") doc.append("==========================") for name, port in list(satellite_map.items()): print("- for %s" % name) raw_data = req.get("%s://localhost:%s/stats" % (scheme, port), verify=False) print("Got /stats: %s" % raw_data.content) assert raw_data.status_code == 200 data = raw_data.json() print("%s, my stats: %s" % (name, json.dumps(data))) doc.append("") doc.append("Daemon type: %s" % name) doc.append("-" * len("Daemon type: %s" % name)) doc.append("") doc.append("A %s daemon statistics example:\n ::\n" % name) doc.append(" %s" % json.dumps(data, sort_keys=True, indent=4)) doc.append("") # Same as start_time assert 'alignak' in data assert 'type' in data assert 'name' in data assert 'version' in data assert 'start_time' in data # + assert "program_start" in data # assert "load" in data assert "metrics" in data # To be deprecated... assert "modules" in data assert "counters" in data if name in ['arbiter']: assert "livestate" in data livestate = data['livestate'] print("Livestate: %s" % livestate) assert "timestamp" in livestate assert "state" in livestate assert "output" in livestate assert "daemons" in livestate for daemon_state in livestate['daemons']: assert livestate['daemons'][daemon_state] == 0 # assert daemon_state in satellite_map.keys() assert "daemons_states" in data daemons_state = data['daemons_states'] for daemon_name in daemons_state: daemon_state = daemons_state[daemon_name] assert "type" in daemon_state assert "name" in daemon_state assert "realm_name" in daemon_state assert "manage_sub_realms" in daemon_state assert "uri" in daemon_state assert "alive" in daemon_state assert "passive" in daemon_state assert "reachable" in daemon_state assert "active" in daemon_state assert "spare" in daemon_state assert "polling_interval" in daemon_state assert "configuration_sent" in daemon_state assert "max_check_attempts" in daemon_state assert "last_check" in daemon_state assert "livestate" in daemon_state assert "livestate_output" in daemon_state rst_write = None rst_file = "daemons_stats.rst" if os.path.exists("../doc/source/api"): rst_write = "../doc/source/api/%s" % rst_file if os.path.exists("../../alignak-doc/source/07_alignak_features/api"): rst_write = "../../alignak-doc/source/07_alignak_features/api/%s" % rst_file if rst_write: with open(rst_write, mode='wt') as out: out.write('\n'.join(doc)) print("Testing stats (detailed)") for name, port in list(satellite_map.items()): print("- for %s" % name) raw_data = req.get("%s://localhost:%s/stats?details=1" % (scheme, port), verify=False) print(" /stats?details=1: %s" % raw_data.content) assert raw_data.status_code == 200 # print("%s, my stats: %s" % (name, raw_data.text)) data = raw_data.json() print(" (detailed): %s" % (json.dumps(data))) # Too complex to check all this stuff # expected = { # "alignak": "My Alignak", "type": "arbiter", "name": "Default-arbiter", # "version": "1.0.0", # "metrics": [ # "arbiter.Default-arbiter.external-commands.queue 0 1514205096" # ], # # "modules": {"internal": {}, "external": {}}, # # "monitoring_objects": { # "servicesextinfo": {"count": 0}, # "businessimpactmodulations": {"count": 0}, # "hostgroups": {"count": 0}, # "escalations": {"count": 0}, # "schedulers": {"count": 1}, # "hostsextinfo": {"count": 0}, # "contacts": {"count": 0}, # "servicedependencies": {"count": 0}, # "resultmodulations": {"count": 0}, # "servicegroups": {"count": 0}, # "pollers": {"count": 1}, # "arbiters": {"count": 1}, # "receivers": {"count": 1}, # "macromodulations": {"count": 0}, # "reactionners": {"count": 1}, # "contactgroups": {"count": 0}, # "brokers": {"count": 1}, # "realms": {"count": 1}, # "services": {"count": 0}, # "commands": {"count": 4}, # "notificationways": {"count": 0}, # "timeperiods": {"count": 1}, # "modules": {"count": 0}, # "checkmodulations": {"count": 0}, # "hosts": {"count": 2}, # "hostdependencies": {"count": 0} # } # } # assert expected == data, "Data is not an unicode!" # ----- # ----- # print("Testing wait_new_conf") # # Except Arbiter (not spare) # for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']: # raw_data = req.get("%s://localhost:%s/wait_new_conf" % (http, satellite_map[daemon]), verify=False) # data = raw_data.json() # assert data == None # ----- # ----- print("Testing have_conf") # Except Arbiter (not spare) for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']: raw_data = req.get("%s://localhost:%s/_have_conf" % (scheme, satellite_map[daemon]), verify=False) assert raw_data.status_code == 200 data = raw_data.json() print("%s, have_conf: %s" % (daemon, data)) assert data == True, "Daemon %s should have a conf!" % daemon # raw_data = req.get("%s://localhost:%s/have_conf?magic_hash=1234567890" % (http, satellite_map[daemon]), verify=False) # data = raw_data.json() # print("%s, have_conf: %s" % (daemon, data)) # assert data == False, "Daemon %s should not accept the magic hash!" % daemon # ----- # ----- print("Testing do_not_run") # Arbiter only raw_data = req.get("%s://localhost:%s/_do_not_run" % (scheme, satellite_map['arbiter']), verify=False) assert raw_data.status_code == 200 data = raw_data.json() print("%s, do_not_run: %s" % (name, data)) # Arbiter master returns False, spare returns True assert data == {'_message': 'Received message to not run. I am the Master arbiter, ' 'ignore and continue to run.', '_status': 'ERR'} # ----- # ----- # print("Testing get_checks on scheduler") # TODO: if have poller running, the poller will get the checks before us # # We need to sleep 10s to be sure the first check can be launched now (check_interval = 5) # sleep(4) # raw_data = req.get("http://localhost:%s/get_checks" % satellite_map['scheduler'], params={'do_checks': True}) # data = unserialize(raw_data.json(), True) # self.assertIsInstance(data, list, "Data is not a list!") # self.assertNotEqual(len(data), 0, "List is empty!") # for elem in data: # self.assertIsInstance(elem, Check, "One elem of the list is not a Check!") # ----- print("Testing managed_configurations") for name, port in list(satellite_map.items()): print("%s, what I manage?" % name) raw_data = req.get( "%s://localhost:%s/managed_configurations" % (scheme, port), verify=False) assert raw_data.status_code == 200 data = raw_data.json() print("%s, what I manage: %s" % (name, data)) assert isinstance(data, dict), "Data is not a dict!" if name != 'arbiter': assert 1 == len(data), "The dict must have 1 key/value!" for sat_id in data: assert 'hash' in data[sat_id] assert 'push_flavor' in data[sat_id] assert 'managed_conf_id' in data[sat_id] else: assert 0 == len(data), "The dict must be empty!" # ----- # ----- print("Testing _external_commands") for name, port in list(satellite_map.items()): raw_data = req.get( "%s://localhost:%s/_external_commands" % (scheme, port), verify=False) assert raw_data.status_code == 200 print("%s _external_commands, got (raw): %s" % (name, raw_data.content)) data = raw_data.json() assert isinstance(data, list), "Data is not a list!" # ----- # ----- # Log level print("Testing get_log_level") for name, port in list(satellite_map.items()): raw_data = req.get("%s://localhost:%s/get_log_level" % (scheme, port), verify=False) print("%s, raw: %s" % (name, raw_data.content)) data = raw_data.json() print("%s, log level: %s" % (name, data)) # Initially forced the ERROR log level (no logger configuration file) assert data['log_level'] == 20 assert data['log_level_name'] == 'INFO' print("Testing set_log_level") for name, port in list(satellite_map.items()): raw_data = req.post("%s://localhost:%s/set_log_level" % (scheme, port), data=json.dumps({'log_level': 'UNKNOWN'}), headers={'Content-Type': 'application/json'}, verify=False) data = raw_data.json() assert data == {"_status": u"ERR", "_message": u"Required log level is not allowed: UNKNOWN"} raw_data = req.post("%s://localhost:%s/set_log_level" % (scheme, port), data=json.dumps({'log_level': 'DEBUG'}), headers={'Content-Type': 'application/json'}, verify=False) print("%s, raw_data: %s" % (name, raw_data.text)) data = raw_data.json() print("%s, log level set as : %s" % (name, data)) assert data['log_level'] == 10 assert data['log_level_name'] == 'DEBUG' print("Testing get_log_level") for name, port in list(satellite_map.items()): # if name in ['arbiter']: # continue raw_data = req.get("%s://localhost:%s/get_log_level" % (scheme, port), verify=False) data = raw_data.json() print("%s, log level: %s" % (name, data)) assert data['log_level'] == 10 print("Resetting log level") for name, port in list(satellite_map.items()): raw_data = req.post("%s://localhost:%s/set_log_level" % (scheme, port), data=json.dumps({'log_level': 'INFO'}), headers={'Content-Type': 'application/json'}, verify=False) print("%s, raw_data: %s" % (name, raw_data.text)) data = raw_data.json() print("%s, log level set as : %s" % (name, data)) assert data['log_level'] == 20 assert data['log_level_name'] == 'INFO' # ----- # ----- print("Testing satellites_configuration") # Arbiter only raw_data = req.get("%s://localhost:%s/satellites_configuration" % (scheme, satellite_map['arbiter']), verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert isinstance(data, dict), "Data is not a dict!" for daemon_type in data: daemons = data[daemon_type] print("Got Alignak state for: %ss / %d instances" % (daemon_type, len(daemons))) for daemon in daemons: print(" - %s: %s", daemon['%s_name' % daemon_type], daemon['alive']) print(" - %s: %s", daemon['%s_name' % daemon_type], daemon) assert daemon['alive'] assert 'realms' not in daemon assert 'confs' not in daemon assert 'tags' not in daemon assert 'con' not in daemon assert 'realm_name' in daemon # ----- # ----- print("Testing _initial_broks") # Scheduler only raw_data = req.get("%s://localhost:%s/_initial_broks" % (scheme, satellite_map['scheduler']), params={'broker_name': 'broker-master'}, verify=False) assert raw_data.status_code == 200 print("_initial_broks, raw_data: %s" % (raw_data.text)) data = raw_data.json() assert data == 0, "Data must be 0 - no broks!" # ----- # ----- print("Testing _broks") # All except the arbiter and the broker itself! for name, port in list(satellite_map.items()): if name in ['arbiter', 'broker']: continue raw_data = req.get("%s://localhost:%s/_broks" % (scheme, port), params={'broker_name': 'broker-master'}, verify=False) assert raw_data.status_code == 200 print("%s, get_broks raw_data: %s" % (name, raw_data.text)) data = raw_data.json() print("%s, broks: %s" % (name, data)) assert isinstance(data, list), "Data is not a list!" # ----- # ----- print("Testing _events") # All except the arbiter and the broker itself! for name, port in list(satellite_map.items()): if name in ['arbiter', 'broker']: continue raw_data = req.get("%s://localhost:%s/_events" % (scheme, port), verify=False) assert raw_data.status_code == 200 print("%s, get_broks raw_data: %s" % (name, raw_data.text)) data = raw_data.json() print("%s, broks: %s" % (name, data)) assert isinstance(data, list), "Data is not a list!" # ----- # ----- print("Testing get_returns") # get_return requested by a scheduler to a potential passive daemons for name in ['reactionner']: raw_data = req.get("%s://localhost:%s/_results" % (scheme, satellite_map[name]), params={'scheduler_instance_id': 'XxX'}, verify=False) assert raw_data.status_code == 200 print("%s, get_returns raw_data: %s" % (name, raw_data.text)) data = raw_data.json() assert isinstance(data, list), "Data is not a list!" for name in ['poller']: raw_data = req.get("%s://localhost:%s/_results" % (scheme, satellite_map[name]), params={'scheduler_instance_id': 'XxX'}, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert isinstance(data, list), "Data is not a list!" # ----- # ----- print("Testing signals") daemon_count = 0 for daemon in ['broker', 'poller', 'reactionner', 'receiver', 'scheduler', 'arbiter']: for proc in psutil.process_iter(): if 'alignak' in proc.name() and daemon in proc.name(): # SIGUSR1: memory dump print("%s, send signal SIGUSR1" % name) proc.send_signal(signal.SIGUSR1) time.sleep(1.0) # SIGUSR2: objects dump print("%s, send signal SIGUSR2" % name) proc.send_signal(signal.SIGUSR2) time.sleep(1.0) # SIGHUP: reload configuration # proc.send_signal(signal.SIGHUP) # time.sleep(1.0) # Other signals is considered as a request to stop... daemon_count += 1 # 14 because all the daemons are forked at least once ;) # todo: The test strategy should be updated to send signals only to the concerned daemons! # assert daemon_count == 14 # ----- # # This function will only send a SIGTERM to the arbiter daemon # # and this will make all the daemons stop # self._stop_alignak_daemons(arbiter_only=True) # # The arbiter daemon will then request its satellites to stop... # this is the same as the following code: print("Testing stop_request - tell the daemons we will stop soon...") for name, port in satellite_map.items(): if name in ['arbiter']: continue print("%s, you will stop soon" % name) raw_data = req.get("%s://localhost:%s/stop_request?stop_now=0" % (scheme, port), verify=False) data = raw_data.json() assert data is True time.sleep(2) print("Testing stop_request - tell the daemons they must stop now!") for name, port in satellite_map.items(): if name in ['arbiter']: continue print("%s, you should stop now!" % name) raw_data = req.get("%s://localhost:%s/stop_request?stop_now=1" % (scheme, port), verify=False) data = raw_data.json() assert data is True # This function will only send a SIGTERM to the arbiter daemon # and this will make all the daemons stop self._stop_alignak_daemons(arbiter_only=True) def test_daemons_configuration(self): """ Running all the Alignak daemons to check their correct configuration Tests for the configuration dispatch API :return: """ self._prepare_my_configuration() self._run_daemons_and_configure(ssl=False) def test_daemons_configuration_no_receiver(self): """ Running all the Alignak daemons to check their correct configuration Do not include any receiver in the daemons list :return: """ daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'scheduler-master'] self._prepare_my_configuration(daemons_list=daemons_list, remove_daemons=['receiver-master']) self._run_daemons_and_configure(ssl=False, daemons_list=daemons_list) def _run_daemons_and_configure(self, ssl=False, daemons_list=None): """ Running all the Alignak daemons to check their correct launch and API Tests for the configuration dispatch API :return: """ full_satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } if daemons_list is None: daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] satellite_map = {'arbiter': '7770'} for sat in full_satellite_map: if "%s-master" % sat in daemons_list: satellite_map[sat] = full_satellite_map[sat] print("Satellites map: %s" % satellite_map) self._run_alignak_daemons(cfg_folder=self.cfg_folder, daemons_list=daemons_list, runtime=5, update_configuration=False) scheme = 'http' if ssl: scheme = 'https' req = requests.Session() # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # We will ask to wait for a new configuration # ----- # 1/ get the running identifier (confirm the daemon is running) print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("%s://localhost:%s/identity" % (scheme, port)) print("Got (raw): %s" % raw_data) data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 2/ ask if have a configuration - must have one! print("--- have_conf") # Except Arbiter (not spare) for name, port in list(satellite_map.items()): if name == 'arbiter-master': continue raw_data = req.get("%s://localhost:%s/_have_conf" % (scheme, port)) print("have_conf %s, got (raw): %s" % (name, raw_data)) data = raw_data.json() print("%s, have_conf: %s" % (name, data)) assert data is True, "Daemon %s should have a conf!" % name # ----- # 3/ ask to wait for a new configuration print("--- wait_new_conf") for name, port in list(satellite_map.items()): if name == 'arbiter-master': continue raw_data = req.get( "%s://localhost:%s/_wait_new_conf" % (scheme, port)) print("wait_new_conf %s, got (raw): %s" % (name, raw_data)) data = raw_data.json() assert data is None # ----- # ----- # 4/ ask if have a configuration - must not have print("--- have_conf") # Except Arbiter (not spare) for name, port in list(satellite_map.items()): if name == 'arbiter-master': continue raw_data = req.get("%s://localhost:%s/_have_conf" % (scheme, port)) print("have_conf %s, got (raw): %s" % (name, raw_data)) data = raw_data.json() print("%s, have_conf: %s" % (name, data)) assert data is False, "Daemon %s should not have a conf!" % name # This function will only send a SIGTERM to the arbiter daemon # self._stop_alignak_daemons(arbiter_only=True) time.sleep(2) # The arbiter daemon will then request its satellites to stop... # this is the same as the following code: print("Testing stop_request - tell the daemons we will stop soon...") for name, port in list(satellite_map.items()): if name in ['arbiter']: continue raw_data = req.get( "%s://localhost:%s/stop_request?stop_now=0" % (scheme, port)) data = raw_data.json() assert data is True time.sleep(1) print("Testing stop_request - tell the daemons they must stop now!") for name, port in list(satellite_map.items()): if name in ['arbiter']: continue raw_data = req.get( "%s://localhost:%s/stop_request?stop_now=1" % (scheme, port)) data = raw_data.json() assert data is True # This function will only send a SIGTERM to the arbiter daemon # and this will make all the daemons stop self._stop_alignak_daemons(arbiter_only=True) def test_get_objects_from_scheduler(self): """ Running all the Alignak daemons - get host and other objects from the scheduler :return: """ self._prepare_my_configuration() self._get_objects('http://localhost:7768') def test_get_objects_from_arbiter(self): """ Running all the Alignak daemons - get host and other objects from the arbiter :return: """ self._prepare_my_configuration() self._get_objects('http://localhost:7770') def _get_objects(self, endpoint): """ Running all the Alignak daemons - get host and other objects from the scheduler or from the arbiter :return: """ cfg_folder = '/tmp/alignak' cfg_dir = 'default_many_hosts' hosts_count = 10 daemons_list = [ 'broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master' ] satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } # Default shipped configuration preparation self._prepare_configuration(copy=True, cfg_folder=cfg_folder) # Specific daemon load configuration preparation if os.path.exists(os.path.join(self._test_dir, './cfg/%s/alignak.cfg' % cfg_dir)): shutil.copy(os.path.join(self._test_dir, './cfg/%s/alignak.cfg' % cfg_dir), '%s/etc' % cfg_folder) if os.path.exists('%s/etc/arbiter' % cfg_folder): shutil.rmtree('%s/etc/arbiter' % cfg_folder) shutil.copytree(os.path.join(self._test_dir, './cfg/%s/arbiter' % cfg_dir), '%s/etc/arbiter' % cfg_folder) self._prepare_hosts_configuration(cfg_folder='%s/etc/arbiter/objects/hosts' % cfg_folder, hosts_count=hosts_count, target_file_name='hosts.cfg') # Some script commands must be copied in the test folder if os.path.exists(os.path.join(self._test_dir, './libexec/check_command.sh')): shutil.copy(os.path.join(self._test_dir, './libexec/check_command.sh'), '%s/check_command.sh' % cfg_folder) # Update the default configuration files files = ['%s/etc/alignak.ini' % cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) cfg.set('alignak-configuration', 'launch_missing_daemons', '1') # cfg.set('alignak-configuration', 'daemons_start_timeout', '15') # cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '15') # # A macro for the check script directory cfg.set('alignak-configuration', '_EXEC_DIR', cfg_folder) for daemon in daemons_list: if cfg.has_section('daemon.%s' % daemon): cfg.set('daemon.%s' % daemon, 'alignak_launched', '1') with open('%s/etc/alignak.ini' % cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False # Run daemons for the required duration self._run_alignak_daemons(cfg_folder='/tmp/alignak', daemons_list=daemons_list, run_folder='/tmp/alignak', runtime=5, # verbose=True ) # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # We will ask to wait for a new configuration # ----- # 1/ get the running identifier (confirm the daemon is running) req = requests.Session() print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("http://localhost:%s/identity" % port, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 2/ ask for a managed host. # The scheduler has a service to get some objects information. This may be used to know if # an host exist in Alignak and to get its configuration and state # Only for the scheduler and arbiter daemons # --- # Get an unknown type object # Query parameter raw_data = req.get("%s/object?o_type=unknown" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = unserialize(raw_data.json(), True) # => error message assert object == { '_message': 'Required unknown not found.', '_status': 'ERR'} # Get an unknown object # Query parameter raw_data = req.get( "%s/object?o_type=realm&o_name=unknown_realm" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = unserialize(raw_data.json(), True) assert object == { '_message': 'Required realm not found.', '_status': 'ERR'} # Get an unknown realm raw_data = req.get("%s/object/realm/unknown_realm" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = unserialize(raw_data.json(), True) assert object == { '_message': 'Required realm not found.', '_status': 'ERR'} # Get an unknown realm - case sensitivity! raw_data = req.get("%s/object/realm/all" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = unserialize(raw_data.json(), True) assert object == { '_message': 'Required realm not found.', '_status': 'ERR'} # Get a known realm raw_data = req.get("%s/object/realm/All" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 # # It should be this: # object = raw_data.json() # object = object['content'] # print("Got object: %s" % object['realm_name']) # assert object['realm_name'] == 'All' # # but the scheduler seem to not have received any realm ! # # todo: investigate this! # Get a known host raw_data = req.get("%s/object/host/localhost" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() print("Got object: %s" % object['content']['host_name']) assert object['content']['host_name'] == 'localhost' # Get a known host from its uuid raw_data = req.get("%s/object/host/%s" % (endpoint, object['content']['uuid'])) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() print("Got object: %s" % object['content']['host_name']) assert object['content']['host_name'] == 'localhost' # --- # Get all hostgroups raw_data = req.get("%s/object/hostgroup" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() groups = unserialize(object, True) assert groups.__class__ == Hostgroups for group in groups: print("Group: %s" % group.get_name()) assert group.__class__ == Hostgroup # --- # Get a hostgroup raw_data = req.get("%s/object/hostgroup/allhosts" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() print("Got hostgroup: %s / %s" % (type(object), object['content']['hostgroup_name'])) assert object['content']['hostgroup_name'] == 'allhosts' group = unserialize(object, True) assert group.__class__ == Hostgroup assert group.get_name() == 'allhosts' # --- # Get all hosts from the hostgroup for m in group.members: raw_data = req.get("%s/object/host/%s" % (endpoint, m)) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 member = raw_data.json() group_host = unserialize(member, True) assert group_host.__class__ == Host print("- group member: %s" % group_host.get_name()) # --- # Get all the services from the host for s in group_host.child_dependencies: print("Get host: %s/%s" % (group_host.get_name(), s)) raw_data = req.get("%s/object/service/%s" % (endpoint, s)) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 member = raw_data.json() host_service = unserialize(member, True) assert host_service.__class__ == Service print(" . service: %s" % host_service.get_full_name()) # --- # Get some host dump (raw mode will return a list of CSV text strings with a header line) raw_data = req.get("%s/dump?raw=1" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 res = raw_data.json() print("Got raw hosts dump %s: %s / %s" % (endpoint, type(res), res)) if endpoint == 'http://localhost:7770': # Arbiter groups data in a schedulers dict ... for sched in res: print("Scheduler: %s" % sched) sched = res[sched] # First list item is for hosts hosts_list = sched[0] print(hosts_list) print(hosts_list[0]) print(hosts_list[1]) assert hosts_list[0] == 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output' # Second list item is for services services_list = sched[1] print(services_list[0]) print(services_list[1]) # Only type;host;name assert services_list[0] in [ 'type;host;name', 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output'] else: assert len(res) == 2 # First list item is for hosts hosts_list = res[0] print(hosts_list[0]) print(hosts_list[1]) assert 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output' == hosts_list[ 0] # Second list item is for services services_list = res[1] print(services_list[0]) print(services_list[1]) assert 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output' == services_list[ 0] # With more details raw_data = req.get("%s/dump?raw=1&details=1" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 res = raw_data.json() print("Got raw detailed hosts dump (%s): %s / %s" % (endpoint, type(res), res)) if endpoint == 'http://localhost:7770': # Arbiter groups data in a schedulers dict ... for sched in res: print("Scheduler: %s" % sched) sched = res[sched] # First list item is for hosts hosts_list = sched[0] print(hosts_list[0]) print(hosts_list[1]) assert hosts_list[0].startswith( 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output;uuid') # Second list item is for services services_list = sched[1] print(services_list[0]) print(services_list[1]) assert services_list[0].startswith( 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output;uuid') else: assert len(res) == 2 # First list item is for hosts hosts_list = res[0] print(hosts_list[0]) print(hosts_list[1]) assert hosts_list[0].startswith( 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output;uuid') # Second list item is for services services_list = res[1] print(services_list[0]) print(services_list[1]) assert services_list[0].startswith( 'type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output;uuid') # Get some host dump, json mode raw_data = req.get("%s/dump" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 res = raw_data.json() print("Got hosts dump: %s / %s" % (type(res), res)) if endpoint == 'http://localhost:7770': # Arbiter groups data in a schedulers dict ... for sched in res: print("Scheduler: %s" % sched) hosts_list = res[sched] assert len(hosts_list) > 1 for host in hosts_list: assert 'name' in host assert 'last_check' in host assert 'state_id' in host assert 'state_type' in host assert 'state' in host assert 'output' in host assert 'is_problem' in host assert 'is_impact' in host assert 'services' in host else: hosts_list = res assert len(hosts_list) > 1 for host in hosts_list: assert 'name' in host assert 'last_check' in host assert 'state_id' in host assert 'state_type' in host assert 'state' in host assert 'output' in host assert 'is_problem' in host assert 'is_impact' in host assert 'services' in host # With more details raw_data = req.get("%s/dump?details=1" % endpoint) print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 res = raw_data.json() print("Got hosts dump: %s / %s" % (type(res), res)) if endpoint == 'http://localhost:7770': # Arbiter groups data in a schedulers dict ... for sched in res: print("Scheduler: %s" % sched) hosts_list = res[sched] assert len(hosts_list) > 1 for host in hosts_list: assert 'name' in host assert 'last_check' in host assert 'state_id' in host assert 'state_type' in host assert 'state' in host assert 'output' in host assert 'is_problem' in host assert 'is_impact' in host assert 'services' in host # More information than without details:) assert 'acknowledged' in host assert 'downtimed' in host assert 'next_check' in host assert 'long_output' in host assert 'perf_data' in host else: hosts_list = res assert len(hosts_list) > 1 for host in hosts_list: assert 'name' in host assert 'last_check' in host assert 'state_id' in host assert 'state_type' in host assert 'state' in host assert 'output' in host assert 'is_problem' in host assert 'is_impact' in host assert 'services' in host # More information than without details:) assert 'acknowledged' in host assert 'downtimed' in host assert 'next_check' in host assert 'long_output' in host assert 'perf_data' in host self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770') def test_get_external_commands(self): """ Running all the Alignak daemons - get external commands :return: """ satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._prepare_my_configuration(daemons_list=daemons_list) self._run_alignak_daemons(cfg_folder=self.cfg_folder, daemons_list=daemons_list, runtime=5) req = requests.Session() # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # We will ask to wait for a new configuration # ----- # 1/ get the running identifier (confirm the daemon is running) print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("http://localhost:%s/identity" % port, verify=False) assert raw_data.status_code == 200 print("Got (raw): %s" % raw_data) data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 2/ notify an external command to the arbiter (as the receiver does). raw_data = req.post("http://localhost:7770/_push_external_command", data=json.dumps( {'command': 'disable_notifications'}), headers={'Content-Type': 'application/json'}, verify=False) print("push_external_commands, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) assert data['_status'] == 'OK' assert data['_message'] == 'Got command: DISABLE_NOTIFICATIONS' assert data['command'] == 'DISABLE_NOTIFICATIONS' raw_data = req.get("http://localhost:7770/_external_commands") assert raw_data.status_code == 200 data = raw_data.json() print("_external_commands, got: %s" % data) assert len(data) <= 1 if data: assert 'creation_timestamp' in data[0] assert data[0]['cmd_line'] == 'DISABLE_NOTIFICATIONS' assert data[0]['my_type'] == 'externalcommand' else: print("Command consumed.") # ----- # 3/ notify an external command to the arbiter (WS interface). # Using the GET HTTP method. # Note the %3B instead of the semi-colons! # For an host # raw_data = req.get("http://localhost:7770/command?command=process_host_check_result%3BHost_name%3B0%3BI am alive!", verify=False) raw_data = req.get("http://localhost:7770/command", params={ 'command': 'process_host_check_result;Host_name;0;I am alive!'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) assert data['_status'] == 'OK' # Note the uppercase for the command, not for the parameters... assert data['_message'] == 'Got command: PROCESS_HOST_CHECK_RESULT;Host_name;0;I am alive!' assert data['command'] == 'PROCESS_HOST_CHECK_RESULT;Host_name;0;I am alive!' raw_data = req.get("http://localhost:7770/_external_commands") assert raw_data.status_code == 200 print("%s _external_commands, got (raw): %s" % (name, raw_data)) data = raw_data.json() print("---Got: %s" % data) assert len(data) <= 1 if data: assert 'creation_timestamp' in data[0] assert data[0]['cmd_line'] == 'PROCESS_HOST_CHECK_RESULT;Host_name;0;I am alive!' assert data[0]['my_type'] == 'externalcommand' else: print("Command consumed.") # ----- # 3/ notify an external command to the arbiter (WS interface). # Using the POST HTTP method (most recommended way). # For an host raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'process_host_check_result;Host_name;0;I am alive!' }), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) assert data['_status'] == 'OK' # Note the uppercase for the command, not for the parameters... assert data['_message'] == 'Got command: PROCESS_HOST_CHECK_RESULT;Host_name;0;I am alive!' assert data['command'] == 'PROCESS_HOST_CHECK_RESULT;Host_name;0;I am alive!' raw_data = req.get("http://localhost:7770/_external_commands") assert raw_data.status_code == 200 print("%s _external_commands, got (raw): %s" % (name, raw_data)) data = raw_data.json() print("---Got: %s" % data) assert len(data) <= 1 if data: assert 'creation_timestamp' in data[0] assert data[0]['cmd_line'] == 'PROCESS_HOST_CHECK_RESULT;Host_name;0;I am alive!' assert data[0]['my_type'] == 'externalcommand' else: print("Command consumed.") # ----- # 3/ notify an external command to the arbiter (WS interface). # For an host raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'disable_passive_host_checks', 'element': 'host_name', 'parameters': 'p1;p2;p3' }), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) assert data['_status'] == 'OK' assert data['_message'] == 'Got command: DISABLE_PASSIVE_HOST_CHECKS;host_name;p1;p2;p3' assert data['command'] == 'DISABLE_PASSIVE_HOST_CHECKS;host_name;p1;p2;p3' raw_data = req.get("http://localhost:7770/_external_commands") assert raw_data.status_code == 200 print("%s _external_commands, got (raw): %s" % (name, raw_data)) data = raw_data.json() print("---Got: %s" % data) assert len(data) <= 1 if data: assert 'creation_timestamp' in data[0] assert data[0]['cmd_line'] == 'DISABLE_PASSIVE_HOST_CHECKS;host_name;p1;p2;p3' assert data[0]['my_type'] == 'externalcommand' else: print("Command consumed.") raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'test', 'host': 'host_name', 'parameters': 'p1;p2;p3' }), headers={'Content-Type': 'application/json'}, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert data['_status'] == 'OK' assert data['_message'] == 'Got command: TEST;host_name;p1;p2;p3' assert data['command'] == 'TEST;host_name;p1;p2;p3' # For a service raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'test', 'element': 'host_name/service', 'parameters': 'p1;p2;p3' }), headers={'Content-Type': 'application/json'}, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert data['_status'] == 'OK' assert data['_message'] == 'Got command: TEST;host_name;service;p1;p2;p3' assert data['command'] == 'TEST;host_name;service;p1;p2;p3' raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'test', 'host': 'host_name', 'service': 'service', 'parameters': 'p1;p2;p3' }), headers={'Content-Type': 'application/json'}, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert data['_status'] == 'OK' assert data['_message'] == 'Got command: TEST;host_name;service;p1;p2;p3' assert data['command'] == 'TEST;host_name;service;p1;p2;p3' # For a user raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'test', 'element': 'user_name', 'parameters': 'p1;p2;p3' }), headers={'Content-Type': 'application/json'}, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert data['_status'] == 'OK' assert data['_message'] == 'Got command: TEST;user_name;p1;p2;p3' assert data['command'] == 'TEST;user_name;p1;p2;p3' raw_data = req.post("http://localhost:7770/command", data=json.dumps({ 'command': 'test', 'user': 'user_name', 'parameters': 'p1;p2;p3' }), headers={'Content-Type': 'application/json'}, verify=False) assert raw_data.status_code == 200 data = raw_data.json() assert data['_status'] == 'OK' assert data['_message'] == 'Got command: TEST;user_name;p1;p2;p3' assert data['command'] == 'TEST;user_name;p1;p2;p3' # Some time to let the daemons handle the commands time.sleep(5) # ----- # Get external commands from all the daemons for name, port in list(satellite_map.items()): raw_data = req.get( "http://localhost:%s/_external_commands" % port, verify=False) assert raw_data.status_code == 200 data = raw_data.json() print("%s _external_commands, got: %s" % (name, data)) # External commands got consumed by the daemons - not always all ! # May be 0 but it seems that sometimes 5 are remaining assert len(data) in [0, 5] # This function will only send a SIGTERM to the arbiter daemon self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770') def _get_stats(self, req, satellite_map, details, run=False): """Get and check daemons statistics""" problems = [] print("--- stats") for name, port in list(satellite_map.items()): print("- for %s" % name) raw_data = req.get("http://localhost:%s/stats%s" % (port, '?details=1' if details else ''), verify=False) print("%s, my stats: %s" % (name, raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() print("%s, my stats: %s" % (name, json.dumps(data))) # Same as start_time assert 'alignak' in data assert 'type' in data assert 'name' in data assert 'version' in data assert 'start_time' in data # + assert "program_start" in data # assert "load" in data assert "metrics" in data # To be deprecated... assert "modules" in data assert "counters" in data # Scheduler specific information if name in ['scheduler']: assert "livesynthesis" in data livesynthesis = data['livesynthesis'] print("%s, my livesynthesis: %s" % (name, livesynthesis)) # if not run: # assert livesynthesis["hosts_total"] == 13 # assert livesynthesis["hosts_up_hard"] == 13 # assert livesynthesis["services_total"] == 100 # assert livesynthesis["services_ok_hard"] == 100 # Detailed information! if details: assert "commands" in data commands = data['commands'] print("%s, my commands: %s" % (name, commands)) assert "problems" in data problems = data['problems'] print("%s, my problems: %s" % (name, problems)) if run: assert len(problems) > 0 for problem in problems: problem = problems[problem] print("A problem: %s" % (problem)) assert "host" in problem assert "service" in problem assert "output" in problem assert "state" in problem assert "state_type" in problem assert "last_state" in problem assert "last_state_type" in problem assert "last_hard_state" in problem assert "last_state_update" in problem assert "last_state_change" in problem assert "last_hard_state_change" in problem # Arbiter specific information if name in ['arbiter']: assert "livestate" in data livestate = data['livestate'] assert "timestamp" in livestate assert "state" in livestate assert "output" in livestate assert "daemons" in livestate for daemon_state in livestate['daemons']: assert livestate['daemons'][daemon_state] == 0 print("%s, my livestate: %s" % (name, livestate)) assert "daemons_states" in data daemons_state = data['daemons_states'] for daemon_name in daemons_state: daemon_state = daemons_state[daemon_name] assert "type" in daemon_state assert "name" in daemon_state assert "realm_name" in daemon_state assert "manage_sub_realms" in daemon_state assert "uri" in daemon_state assert "alive" in daemon_state assert "passive" in daemon_state assert "reachable" in daemon_state assert "active" in daemon_state assert "spare" in daemon_state assert "polling_interval" in daemon_state assert "configuration_sent" in daemon_state assert "max_check_attempts" in daemon_state assert "last_check" in daemon_state assert "livestate" in daemon_state assert "livestate_output" in daemon_state print("%s, my daemons state: %s" % (name, daemons_state)) # Detailed information! if details: assert "monitoring_objects" in data monitoring_objects = data['monitoring_objects'] assert "hosts" in monitoring_objects assert "items" in monitoring_objects["hosts"] assert "count" in monitoring_objects["hosts"] assert monitoring_objects["hosts"]["count"] == 13 assert "services" in monitoring_objects assert "items" in monitoring_objects["services"] assert "count" in monitoring_objects["services"] assert monitoring_objects["services"]["count"] == 100 for o_type in monitoring_objects: print("%s, my %s: %d items" % (name, o_type, monitoring_objects[o_type]["count"])) assert monitoring_objects[o_type]["count"] == len( monitoring_objects[o_type]["items"]) return problems def test_get_stats(self): """ Running all the Alignak daemons - get daemons statistics :return: """ satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._prepare_my_configuration(daemons_list=daemons_list) self._run_alignak_daemons(cfg_folder=self.cfg_folder, arbiter_only=True, daemons_list=daemons_list, runtime=5, update_configuration=False) req = requests.Session() # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # ----- # 1/ get the running identifier (confirm the daemon is running) print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("http://localhost:%s/identity" % port, verify=False) assert raw_data.status_code == 200 print("Got (raw): %s" % raw_data) data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 1/ get Alignak overall problems print("--- get monitoring problems") raw_data = req.get("http://localhost:7770/monitoring_problems") print("Alignak problems: %s" % (raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() assert 'alignak' in data assert 'type' in data assert 'name' in data assert 'version' in data # No problems exist for the scheduler master! assert 'problems' in data # assert 'scheduler-master' in data['problems'] # assert '_freshness' in data['problems']['scheduler-master'] # assert 'problems' in data['problems']['scheduler-master'] # assert data['problems']['scheduler-master']['problems'] == {} # ----- # 2/ get the daemons statistics - no details time.sleep(1) self._get_stats(req, satellite_map, False) time.sleep(1) # ----- # 3/ once again, get the daemons statistics - with more details self._get_stats(req, satellite_map, True) # Sleep some seconds for some checks to execute time.sleep(120) # ----- # 4/ once again, get the daemons statistics - with more details problems = self._get_stats(req, satellite_map, True, run=True) print("Problems: %s" % problems) time.sleep(1) # ----- # 4/ get Alignak log print("--- get events log") raw_data = req.get("http://localhost:7770/events_log") print("Alignak events log: %s" % (raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() # ----- # 5/ get Alignak overall problems print("--- get monitoring problems") raw_data = req.get("http://localhost:7770/monitoring_problems") print("Alignak problems: %s" % (raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() assert 'alignak' in data assert 'type' in data assert 'name' in data assert 'version' in data # Now, some problems exist for the scheduler master! assert 'problems' in data assert '_freshness' in data assert 'scheduler-master' in data['problems'] assert 'problems' in data['problems']['scheduler-master'] # I have some problems assert len(data['problems']['scheduler-master']) > 0 for problem in data['problems']['scheduler-master']['problems']: problem = data['problems']['scheduler-master']['problems'][problem] print("A problem: %s" % (problem)) # 5bis/ get Alignak scheduler problems print("--- get monitoring problems") raw_data = req.get("http://localhost:7768/monitoring_problems") print("Alignak problems: %s" % (raw_data.text)) assert raw_data.status_code == 200 data2 = raw_data.json() assert 'alignak' in data2 assert 'type' in data2 assert 'name' in data2 assert 'version' in data2 assert 'start_time' in data2 doc = [] doc.append(".. _alignak_features/monitoring_problems:") doc.append("") doc.append(".. Built from the test_daemons_api.py unit test last run!") doc.append("") doc.append("===========================") doc.append("Alignak monitoring problems") doc.append("===========================") doc.append("") doc.append("On a scheduler endpoint: ``/monitoring_problems``") doc.append("") doc.append("::") doc.append("") doc.append(" %s" % json.dumps(data2, sort_keys=True, indent=4)) doc.append("") doc.append("On the arbiter endpoint: ``/monitoring_problems``") doc.append("") doc.append("::") doc.append("") doc.append(" %s" % json.dumps(data, sort_keys=True, indent=4)) doc.append("") doc.append("") rst_write = None rst_file = "alignak_monitoring_problems.rst" if os.path.exists("../doc/source/api"): rst_write = "../doc/source/api/%s" % rst_file if os.path.exists("../../alignak-doc/source/07_alignak_features/api"): rst_write = "../../alignak-doc/source/07_alignak_features/api/%s" % rst_file if rst_write: with open(rst_write, mode='wt') as out: # with open(rst_write, mode='wt', encoding='utf-8') as out: out.write('\n'.join(doc)) # ----- # ----- # 6/ get Alignak overall live synthesis print("--- get livesynthesis") raw_data = req.get("http://localhost:7770/livesynthesis") print("Alignak livesynthesis: %s" % (raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() assert 'alignak' in data assert 'type' in data assert 'name' in data assert 'version' in data assert 'start_time' in data # Now, some problems exist for the scheduler master! assert 'livesynthesis' in data assert '_overall' in data['livesynthesis'] assert 'scheduler-master' in data['livesynthesis'] assert '_freshness' in data['livesynthesis']['scheduler-master'] assert 'livesynthesis' in data['livesynthesis']['scheduler-master'] livesynthesis = data['livesynthesis']['scheduler-master']['livesynthesis'] print("LS: %s" % livesynthesis) doc = [] doc.append(".. _alignak_features/livesynthesis:") doc.append("") doc.append(".. Built from the test_daemons_api.py unit test last run!") doc.append("") doc.append("=====================") doc.append("Alignak livesynthesis") doc.append("=====================") doc.append("") doc.append("") doc.append("On the arbiter endpoint: ``/livesynthesis``") doc.append("") doc.append("::") doc.append("") doc.append(" %s" % json.dumps(data, sort_keys=True, indent=4)) doc.append("") doc.append("") rst_write = None rst_file = "alignak_livesynthesis.rst" if os.path.exists("../doc/source/api"): rst_write = "../doc/source/api/%s" % rst_file if os.path.exists("../../alignak-doc/source/07_alignak_features/api"): rst_write = "../../alignak-doc/source/07_alignak_features/api/%s" % rst_file if rst_write: with open(rst_write, mode='wt') as out: # with open(rst_write, mode='wt', encoding='utf-8') as out: out.write('\n'.join(doc)) # ----- # ----- # 7/ get Alignak log print("--- get events log") raw_data = req.get("http://localhost:7770/events_log") print("Alignak events log: %s" % (raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() for log in data: print(log) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770') def test_get_realms(self): """ Running all the Alignak daemons - get realms organization :return: """ satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._prepare_my_configuration(daemons_list=daemons_list, cfg_dir='default_multi_realms', realms=['All', 'Europe', 'Asia', 'France', 'Japan']) self._run_alignak_daemons(cfg_folder=self.cfg_folder, arbiter_only=True, daemons_list=daemons_list, runtime=5, update_configuration=False) req = requests.Session() # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # ----- # 1/ get the running identifier (confirm the daemon is running) print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("http://localhost:%s/identity" % port, verify=False) assert raw_data.status_code == 200 print("Got (raw): %s" % raw_data) data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 2/ get Alignak realms print("--- get realms") raw_data = req.get("http://localhost:7770/realms") print("Alignak realms: %s" % (raw_data.text)) assert raw_data.status_code == 200 data = raw_data.json() r = { "All": dict(name="All", level=0, hosts=[ "host-all-7", "host-all-6", "host-all-3", "host-all-2", "host-all-0", "host-all-1", "localhost", "host-all-8", "host-all-9", "host-all-5", "host-all-4" ], hostgroups=["monitoring_servers"], children={ "Asia": dict(name="Asia", level=1, hosts=["host-asia-3", "host-asia-7", "host-asia-9", "host-asia-4", "host-asia-8", "host-asia-1", "host-asia-2", "host-asia-0", "host-asia-5", "host-asia-6"], hostgroups=[], children={ "Japan": {"name": "Japan", "level": 2, "hosts": ["host-japan-8", "host-japan-7", "host-japan-3", "host-japan-4", "host-japan-0", "host-japan-5", "host-japan-1", "host-japan-2", "host-japan-9", "host-japan-6"], "hostgroups": [], "children": { "Osaka": {"name": "Osaka", "level": 3, "hosts": [], "hostgroups": [], "children": {}, "satellites": {"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}, "Tokyo": {"name": "Tokyo", "level": 3, "hosts": ["h_Tokyo"], "hostgroups": [], "children": {}, "satellites": {"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}} }, "satellites": dict(schedulers=["scheduler-master"], reactionners=[ "reactionner-master"], brokers=["broker-master"], receivers=[ "receiver-master"], pollers=["poller-master"])} }, satellites={"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}), "Europe": dict(name="Europe", level=1, hosts=["host-europe-8", "host-europe-0", "host-europe-2", "host-europe-3", "host-europe-9", "host-europe-1", "host-europe-5", "host-europe-7", "host-europe-6", "host-europe-4"], hostgroups=[], children={ "France": {"name": "France", "level": 2, "hosts": ["host-france-4", "h_France", "host-france-0", "host-france-1", "host-france-8", "host-france-5", "host-france-7", "host-france-9", "host-france-6", "host-france-3", "host-france-2"], "hostgroups": [], "children": { "Lyon": {"name": "Lyon", "level": 3, "hosts": ["h_Lyon"], "hostgroups": [], "children": {}, "satellites": {"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}, "Paris": {"name": "Paris", "level": 3, "hosts": ["h_Paris"], "hostgroups": [], "children": {}, "satellites": {"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}}, "satellites": {"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}, "Italy": {"name": "Italy", "level": 2, "hosts": [], "hostgroups": [], "children": { "Roma": {"name": "Roma", "level": 3, "hosts": ["h_Roma"], "hostgroups": [], "children": {}, "satellites": { "schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}, "Torino": {"name": "Torino", "level": 3, "hosts": [], "hostgroups": [], "children": {}, "satellites": { "schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}}, "satellites": {"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}}}, satellites={"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}) }, satellites={"schedulers": ["scheduler-master"], "reactionners": ["reactionner-master"], "brokers": ["broker-master"], "receivers": ["receiver-master"], "pollers": ["poller-master"]}) } assert 'All' in data assert data['All']['name'] == 'All' assert data['All']['level'] == 0 assert len(data['All']["hosts"]) == 11 assert len(data['All']["hostgroups"]) == 4 assert 'satellites' in data['All'] assert 'children' in data['All'] assert 'Europe' in data['All']['children'] assert data['All']['children']['Europe']['level'] == 1 assert 'Asia' in data['All']['children'] assert data['All']['children']['Asia']['level'] == 1 # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770') def test_grafana_datasource(self): """ Arbiter interface- Grafana datasource :return: """ satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._prepare_my_configuration(daemons_list=daemons_list) self._run_alignak_daemons(cfg_folder=self.cfg_folder, daemons_list=daemons_list, runtime=5) req = requests.Session() # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # We will ask to wait for a new configuration # ----- # 1/ get the running identifier (confirm the daemon is running) print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("http://localhost:%s/identity" % port, verify=False) assert raw_data.status_code == 200 print("Got (raw): %s" % raw_data) data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 2/ get the available tables raw_data = req.options("http://localhost:7770/search") print("command, got (raw): %s" % (raw_data)) assert raw_data.status_code == 200 # assert raw_data.content == '{}' raw_data = req.get("http://localhost:7770/search", verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) assert data == ["events_log", "problems_log"] # ----- # 2/ get the events log raw_data = req.options("http://localhost:7770/query",) print("command, got (raw): %s" % (raw_data)) assert raw_data.status_code == 200 # assert raw_data.content == '{}' data = { 'timezone': 'browser', 'panelId': 38, 'range': { 'from': '2018-08-29T03:54:32.050Z', 'to': '2018-08-29T04:54:32.050Z', 'raw': { 'from': 'now-1h', 'to': 'now' } }, 'rangeRaw': { 'from': 'now-1h', 'to': 'now' }, 'interval': '15s', 'intervalMs': 15000, 'targets': [ {'target': 'events_log', 'refId': 'A', 'type': 'table'} ], 'format': 'json', 'maxDataPoints': 242, 'scopedVars': { '__interval': {'text': '15s', 'value': '15s'}, '__interval_ms': {'text': 15000, 'value': 15000} } } raw_data = req.post("http://localhost:7770/query", data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) # assert data == [ # {u'rows': [], u'type': u'table', # u'columns': [{u'sort': True, u'text': u'Time', u'type': u'time', u'desc': True}, # {u'text': u'Severity', u'type': u'integer'}, # {u'text': u'Message', u'type': u'string'}]} # ] data = data[0] assert 'type' in data assert data['type'] == 'table' assert 'rows' in data assert 'columns' in data assert data['columns'] == [ {u'sort': True, u'text': u'Time', u'type': u'time', u'desc': True}, {u'text': u'Severity', u'type': u'integer'}, {u'text': u'Message', u'type': u'string'}] # ----- # 3/ get the problems log raw_data = req.options("http://localhost:7770/query",) print("command, got (raw): %s" % (raw_data)) assert raw_data.status_code == 200 # assert raw_data.content == '{}' data = { 'timezone': 'browser', 'panelId': 38, 'range': { 'from': '2018-08-29T03:54:32.050Z', 'to': '2018-08-29T04:54:32.050Z', 'raw': { 'from': 'now-1h', 'to': 'now' } }, 'rangeRaw': { 'from': 'now-1h', 'to': 'now' }, 'interval': '15s', 'intervalMs': 15000, 'targets': [ {'target': 'problems_log', 'refId': 'A', 'type': 'table'} ], 'format': 'json', 'maxDataPoints': 242, 'scopedVars': { '__interval': {'text': '15s', 'value': '15s'}, '__interval_ms': {'text': 15000, 'value': 15000} } } raw_data = req.post("http://localhost:7770/query", data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) assert data == [ {u'rows': [], u'type': u'table', u'columns': [{u'sort': True, u'text': u'Raised', u'type': u'time', u'desc': True}, {u'text': u'Severity', u'type': u'integer'}, {u'text': u'Host', u'type': u'string'}, {u'text': u'Service', u'type': u'string'}, {u'text': u'State', u'type': u'integer'}, {u'text': u'Output', u'type': u'string'}]} ] def test_host_passive_ws(self): """ Arbiter interface- Host passive check :return: """ satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._prepare_my_configuration(daemons_list=daemons_list) self._run_alignak_daemons(cfg_folder=self.cfg_folder, daemons_list=daemons_list, runtime=5) req = requests.Session() # Here the daemons got started by the arbiter and the arbiter dispatched a configuration # We will ask to wait for a new configuration # ----- # 1/ get the running identifier (confirm the daemon is running) print("--- get_running_id") for name, port in list(satellite_map.items()): raw_data = req.get("http://localhost:%s/identity" % port, verify=False) assert raw_data.status_code == 200 print("Got (raw): %s" % raw_data) data = raw_data.json() assert "running_id" in data print("%s, my running id: %s" % (name, data['running_id'])) # ----- # ----- # 2/ Upload an host (only) information now = int(time.time()) # Only the host name, no live state data data = { 'name': 'test_host' } raw_data = req.post("http://localhost:7770/host", data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) # Status: OK # Host is alive :) # Created and raised an host passive check command # No issues assert data == { u'_status': u'OK', u'_result': [ u'test_host is alive :)', u'Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;' % now ], u'_issues': [] } # Now, with live state data now = int(time.time()) data = { "name": "test_host", "livestate": { "state": "UP", "output": "Output...", "long_output": "Long output...", "perf_data": "'counter'=1", } } raw_data = req.post("http://localhost:7770/host", data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) # Status: OK # Host is alive :) # Created and raised an host passive check command # No issues assert '_status' in data assert data['_status'] == u'OK' assert '_issues' in data assert data['_issues'] == [] assert '_result' in data if data['_result'] != [ u'test_host is alive :)', u"Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;Output..." u"|'counter'=1\nLong output..." % now ]: if data['_result'] != [ u'test_host is alive :)', u"Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;Output..." u"|'counter'=1\nLong output..." % (now + 1) ]: assert False # assert data == { # u'_status': u'OK', # u'_result': [ # u'test_host is alive :)', # u"Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;Output..." # u"|'counter'=1\nLong output..." % now # ], # u'_issues': [] # } # ----- # 3/ Upload an host and its services information now = int(time.time()) # Only the host and its services names, no live state data data = { 'name': 'test_host', "services": [ { "name": "test_ok_0" }, { "name": "test_ok_1" }, { "name": "test_ok_2" } ] } raw_data = req.post("http://localhost:7770/host", data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) # Status: OK # Host is alive :) # Services are in OK state # Created and raised some host and services passive check command # No issues assert '_status' in data assert data['_status'] == u'OK' assert '_issues' in data assert data['_issues'] == [] assert '_result' in data if data['_result'] != [ u'test_host is alive :)', u'Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;' % now, u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_0;0;' % now, u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_1;0;' % now, u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_2;0;' % now ]: if data['_result'] != [ u'test_host is alive :)', u'Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;' % ( now + 1), u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_0;0;' % ( now + 1), u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_1;0;' % ( now + 1), u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_2;0;' % ( now + 1) ]: assert False # assert data == { # u'_status': u'OK', # u'_result': [ # u'test_host is alive :)', # u'Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;' % now, # u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_0;0;' % now, # u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_1;0;' % now, # u'Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_2;0;' % now # ], # u'_issues': [] # } # # Now, with live state data now = int(time.time()) data = { 'name': 'test_host', "services": [ { "name": "test_ok_0", "livestate": { "state": "ok", "output": "Output 0", "long_output": "Long output 0", "perf_data": "'counter'=0" } }, { "name": "test_ok_1", "livestate": { "state": "warning", "output": "Output 1", "long_output": "Long output 1", "perf_data": "'counter'=1" } }, { "name": "test_ok_2", "livestate": { "state": "critical", "output": "Output 2", "long_output": "Long output 2", "perf_data": "'counter'=2" } }, ] } raw_data = req.post("http://localhost:7770/host", data=json.dumps(data), headers={'Content-Type': 'application/json'}, verify=False) print("command, got (raw): %s" % (raw_data.content)) assert raw_data.status_code == 200 data = raw_data.json() print("Got: %s" % data) # Status: OK # Host is alive :) # Created and raised some host and services passive check command # No issues assert '_status' in data assert data['_status'] == u'OK' assert '_issues' in data assert data['_issues'] == [] assert '_result' in data if data['_result'] != [ u'test_host is alive :)', u"Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;" % now, u"Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_0;0;Output 0" u"|'counter'=0\nLong output 0" % now, u"Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_1;1;Output 1" u"|'counter'=1\nLong output 1" % now, u"Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_2;2;Output 2" u"|'counter'=2\nLong output 2" % now ]: if data['_result'] != [ u'test_host is alive :)', u"Raised: [%s] PROCESS_HOST_CHECK_RESULT;test_host;0;" % ( now + 1), u"Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_0;0;Output 0" u"|'counter'=0\nLong output 0" % (now + 1), u"Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_1;1;Output 1" u"|'counter'=1\nLong output 1" % (now + 1), u"Raised: [%s] PROCESS_SERVICE_CHECK_RESULT;test_host;test_ok_2;2;Output 2" u"|'counter'=2\nLong output 2" % (now + 1) ]: assert False
class TestDaemonsApi(AlignakTest): '''Test the daemons HTTP API''' def setUp(self): pass def tearDown(self): pass def _prepare_my_configuration(self, daemons_list=None, remove_daemons=None, cfg_dir=None, realms=None): pass def test_daemons_api_no_ssl(self): ''' Running all the Alignak daemons - no SSL :return: ''' pass @pytest.mark.skip("See #986 - SSL is broken with test files!") def test_daemons_api_ssl(self): ''' Running all the Alignak daemons - with SSL :return: None ''' pass def _run_daemons_and_test_api(self, ssl=False): ''' Running all the Alignak daemons to check their correct launch and API responses This test concerns only the main API features ... :return: ''' pass def test_daemons_configuration(self): ''' Running all the Alignak daemons to check their correct configuration Tests for the configuration dispatch API :return: ''' pass def test_daemons_configuration_no_receiver(self): ''' Running all the Alignak daemons to check their correct configuration Do not include any receiver in the daemons list :return: ''' pass def _run_daemons_and_configure(self, ssl=False, daemons_list=None): ''' Running all the Alignak daemons to check their correct launch and API Tests for the configuration dispatch API :return: ''' pass def test_get_objects_from_scheduler(self): ''' Running all the Alignak daemons - get host and other objects from the scheduler :return: ''' pass def test_get_objects_from_arbiter(self): ''' Running all the Alignak daemons - get host and other objects from the arbiter :return: ''' pass def _get_objects(self, endpoint): ''' Running all the Alignak daemons - get host and other objects from the scheduler or from the arbiter :return: ''' pass def test_get_external_commands(self): ''' Running all the Alignak daemons - get external commands :return: ''' pass def _get_stats(self, req, satellite_map, details, run=False): '''Get and check daemons statistics''' pass def test_get_stats(self): ''' Running all the Alignak daemons - get daemons statistics :return: ''' pass def test_get_realms(self): ''' Running all the Alignak daemons - get realms organization :return: ''' pass def test_grafana_datasource(self): ''' Arbiter interface- Grafana datasource :return: ''' pass def test_host_passive_ws(self): ''' Arbiter interface- Host passive check :return: ''' pass
20
16
130
12
93
25
8
0.27
1
13
4
0
18
1
18
73
2,352
231
1,669
148
1,648
455
1,251
138
1,232
45
2
5
149
3,922
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests_integ/test_launch_daemons.py
tests_integ.test_launch_daemons.TestLaunchDaemons
class TestLaunchDaemons(AlignakTest): def setUp(self): super(TestLaunchDaemons, self).setUp() self.cfg_folder = '/tmp/alignak' self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder) files = ['%s/etc/alignak.ini' % self.cfg_folder, '%s/etc/alignak.d/daemons.ini' % self.cfg_folder, '%s/etc/alignak.d/modules.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) cfg.set('alignak-configuration', 'launch_missing_daemons', '1') cfg.set('daemon.arbiter-master', 'alignak_launched', '1') cfg.set('daemon.scheduler-master', 'alignak_launched', '1') cfg.set('daemon.poller-master', 'alignak_launched', '1') cfg.set('daemon.reactionner-master', 'alignak_launched', '1') cfg.set('daemon.receiver-master', 'alignak_launched', '1') cfg.set('daemon.broker-master', 'alignak_launched', '1') with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False def tearDown(self): # Restore the default test logger configuration if 'ALIGNAK_LOGGER_CONFIGURATION' in os.environ: del os.environ['ALIGNAK_LOGGER_CONFIGURATION'] print("Test terminated!") def test_arbiter_missing_parameters(self): """ Running the Alignak Arbiter with missing command line parameters :return: """ print("Launching arbiter with missing parameters...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py") ] arbiter = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("%s launched (pid=%d)" % ('arbiter', arbiter.pid)) # Waiting for arbiter to parse the configuration sleep(3) ret = arbiter.poll() print("*** Arbiter exited with code: %d" % ret) assert ret is not None, "Arbiter is still running!" stderr = arbiter.stderr.read() print(stderr) assert b"usage: alignak_arbiter.py" in stderr # Arbiter process must exit with a return code == 2 assert ret == 2 def test_arbiter_no_environment(self): """ Running the Alignak Arbiter without environment file :return: """ print("Launching arbiter without environment file...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py") ] arbiter = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("%s launched (pid=%d)" % ('arbiter', arbiter.pid)) # Waiting for arbiter to parse the configuration sleep(3) ret = arbiter.poll() print("*** Arbiter exited with code: %d" % ret) assert ret is not None, "Arbiter is still running!" stdout = arbiter.stdout.read() print(stdout) stderr = arbiter.stderr.read() print(stderr) assert b"usage: alignak_arbiter.py" in stderr # Arbiter process must exit with a return code == 2 assert ret == 2 # @pytest.mark.skip("To be re-activated with spare mode") def test_arbiter_class_no_environment(self): """ Instantiate the Alignak Arbiter class without environment file :return: """ from alignak.daemons.arbiterdaemon import Arbiter print("Instantiate arbiter without environment file...") # Using values that are usually provided by the command line parameters args = { 'env_file': '', 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master', 'log_filename': '/tmp/arbiter.log', 'legacy_cfg_files': [os.path.join(self._test_dir, '../etc/alignak.cfg')] } # Exception because the logger configuration file does not exist self.arbiter = Arbiter(**args) print("Arbiter: %s" % self.arbiter) assert self.arbiter.env_filename == '' assert self.arbiter.legacy_cfg_files == [os.path.abspath( os.path.join(self._test_dir, '../etc/alignak.cfg'))] # Configure the logger self.arbiter.log_level = 'ERROR' self.arbiter.setup_alignak_logger() # Setup our modules manager # self.arbiter.load_modules_manager() # Load and initialize the arbiter configuration # This to check that the configuration is correct! self.arbiter.load_monitoring_config_file() def test_arbiter_class_env_default(self): """ Instantiate the Alignak Arbiter class without legacy cfg files :return: """ # Unset legacy configuration files files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) # Nagios legacy files - not configured cfg.set('alignak-configuration', 'cfg', '') with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False from alignak.daemons.arbiterdaemon import Arbiter print("Instantiate arbiter with default environment file...") # Using values that are usually provided by the command line parameters args = { 'env_file': "/tmp/alignak/etc/alignak.ini", 'daemon_name': 'arbiter-master' } self.arbiter = Arbiter(**args) print("Arbiter: %s" % (self.arbiter)) print("Arbiter: %s" % (self.arbiter.__dict__)) assert self.arbiter.env_filename == '/tmp/alignak/etc/alignak.ini' assert self.arbiter.legacy_cfg_files == [] assert len(self.arbiter.legacy_cfg_files) == 0 # Configure the logger self.arbiter.log_level = 'INFO' self.arbiter.setup_alignak_logger() # Setup our modules manager # self.arbiter.load_modules_manager() # Load and initialize the arbiter configuration # This to check that the configuration is correct! self.arbiter.load_monitoring_config_file() # No legacy files found assert len(self.arbiter.legacy_cfg_files) == 0 def test_arbiter_unexisting_environment(self): """ Running the Alignak Arbiter with a not existing environment file :return: """ print("Launching arbiter with a not existing environment file...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", "/tmp/etc/unexisting.ini" ] arbiter = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("%s launched (pid=%d)" % ('arbiter', arbiter.pid)) # Waiting for arbiter to parse the configuration sleep(3) ret = arbiter.poll() print("*** Arbiter exited with code: %d" % ret) assert ret is not None, "Arbiter is still running!" stdout = arbiter.stdout.read() print(stdout) assert b"Daemon 'arbiter-master' did not correctly read " \ b"Alignak environment file: /tmp/etc/unexisting.ini" in stdout # Arbiter process must exit with a return code == 1 assert ret == 99 def test_arbiter_no_monitoring_configuration(self): """ Running the Alignak Arbiter with no monitoring configuration defined - no legacy cfg files :return: """ print("Launching arbiter with no monitoring configuration...") # Unset legacy configuration files files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) # Nagios legacy files - not configured cfg.set('alignak-configuration', 'cfg', '') with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder ] ret = self._run_command_with_timeout(args, 30) errors = 0 ok = False with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: if 'total number of hosts in all realms: 0' in line: ok = True assert errors == 0 assert ok def test_arbiter_unexisting_monitoring_configuration(self): """ Running the Alignak Arbiter with a not existing monitoring configuration file :return: """ print("Launching arbiter with no monitoring configuration...") files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) # Nagios legacy files cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak-missing.cfg') with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder ] ret = self._run_command_with_timeout(args, 20) errors = 0 ok = False with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: if 'WARNING:' in line and "cannot open main file '/tmp/alignak/etc/alignak-missing.cfg' for reading" in line: ok = True if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # Arbiter process must exit with a return code == 0 and no errors assert errors == 2 # Arbiter process must exit with a return code == 1 assert ret == 1 assert ok def test_arbiter_bad_configuration(self): """ Running the Alignak Arbiter with bad monitoring configuration (unknown sub directory) :return: """ print("Launching arbiter with a bad monitoring configuration...") files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) # Nagios legacy files cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak.cfg') with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False # Update configuration with a bad file name files = ['%s/etc/alignak.cfg' % self.cfg_folder] replacements = { 'cfg_dir=arbiter/templates': 'cfg_dir=unexisting/objects/realms' } self._files_update(files, replacements) args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder ] ret = self._run_command_with_timeout(args, 20) errors = 0 ok = False with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: if 'ERROR:' in line and "*** One or more problems were encountered while " \ "processing the configuration (first check)..." in line: ok = True if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # Arbiter process must exit with a return code == 0 and no errors assert errors == 2 # Arbiter process must exit with a return code == 1 assert ret == 1 assert ok def test_arbiter_i_am_not_configured(self): """ Running the Alignak Arbiter with missing arbiter configuration :return: """ print("Launching arbiter with a missing arbiter configuration...") # Current working directory for the default log file! if os.path.exists('%s/my-arbiter-name.log' % os.getcwd()): os.remove('%s/my-arbiter-name.log' % os.getcwd()) args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-n", "my-arbiter-name" ] ret = self._run_command_with_timeout(args, 20) errors = 0 ok = False # Note the log filename! with open('%s/my-arbiter-name.log' % os.getcwd()) as f: for line in f: if "I cannot find my own configuration (my-arbiter-name)" in line: ok = True if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # Arbiter process must exit with a return code == 0 and no errors assert errors == 2 # Arbiter process must exit with a return code == 1 assert ret == 1 assert ok def test_arbiter_verify(self): """ Running the Alignak Arbiter in verify mode only with the default shipped configuration :return: """ # Set a specific logger configuration - do not use the default test configuration os.environ['ALIGNAK_LOGGER_CONFIGURATION'] = \ os.path.abspath('./etc/warning_alignak-logger.json') print("Logger configuration file is: %s" % os.environ['ALIGNAK_LOGGER_CONFIGURATION']) print("Launching arbiter in verification mode...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V" ] ret = self._run_command_with_timeout(args, 20) errors = 0 specific_log = False info_log = False with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: if 'INFO:' in line: info_log = True if 'Arbiter is in configuration check mode' in line: specific_log = True if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # Arbiter process must exit with a return code == 0 and no errors # Arbiter changed the log level to INFO because of the verify mode assert specific_log is True assert info_log is True assert errors == 0 assert ret == 0 def test_arbiter_parameters_pid(self): """ Run the Alignak Arbiter with some parameters - set a pid file :return: """ # All the default configuration files are in /tmp/etc print("Launching arbiter with forced PID file...") if os.path.exists('/tmp/arbiter.pid'): os.remove('/tmp/arbiter.pid') args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V", "--pid_file", "/tmp/arbiter.pid" ] ret = self._run_command_with_timeout(args, 20) # The arbiter unlinks the pid file - I cannot assert it exists! # assert os.path.exists('/tmp/arbiter.pid') errors = 0 # ok = False with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: # if 'Unlinking /tmp/arbiter.pid' in line: # ok = True if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # Arbiter process must exit with a return code == 0 and no errors assert errors == 0 assert ret == 0 # assert ok def test_arbiter_parameters_log(self): """ Run the Alignak Arbiter with some parameters - log file name Log file name and log level may be specified on the command line :return: """ # All the default configuration files are in /tmp/etc print("Launching arbiter with forced log file...") if os.path.exists('/tmp/arbiter.log'): os.remove('/tmp/arbiter.log') args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V", "-vv", "--log_level", "INFO", "--log_file", "/tmp/arbiter.log" ] ret = self._run_command_with_timeout(args, 20) # Log file created because of the -V option assert os.path.exists("/tmp/arbiter.log") errors = 0 with open('/tmp/arbiter.log') as f: for line in f: if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # Arbiter process must exit with a return code == 0 and no errors assert errors == 0 assert ret == 0 @pytest.mark.skip("To be re-activated with spare mode") def test_arbiter_spare_missing_configuration(self): """ Run the Alignak Arbiter in spare mode - missing spare configuration :return: """ print("Launching arbiter in spare mode...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-a", cfg_folder + "/alignak.cfg", "-c", cfg_folder + "/daemons/arbiterd.ini", "-n", "arbiter-spare" ] arbiter = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("%s launched (pid=%d)" % ('arbiter', arbiter.pid)) sleep(5) ret = arbiter.poll() print("*** Arbiter exited with code: %s" % ret) assert ret is not None, "Arbiter is still running!" # Arbiter process must exit with a return code == 1 assert ret == 1 @pytest.mark.skip("To be re-activated with spare mode") def test_arbiter_spare(self): """ Run the Alignak Arbiter in spare mode - missing spare configuration :return: """ print("Launching arbiter in spare mode...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-a", cfg_folder + "/alignak.cfg", "-c", cfg_folder + "/daemons/arbiterd.ini", "-n", "arbiter-spare" ] arbiter = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("%s launched (pid=%d)" % ('arbiter', arbiter.pid)) ret = arbiter.poll() # Arbiter must still be running ... it is still trying to dispatch the configuration! assert ret is None, "Arbiter exited!" sleep(5) # Arbiter never stops trying to send its configuration! We must kill it... print("Asking arbiter to end...") os.kill(arbiter.pid, signal.SIGTERM) ret = arbiter.poll() print("*** Arbiter exited on kill, no return code!") assert ret is None, "Arbiter is still running!" # No ERRORS because the daemons are not alive ! ok = 0 for line in iter(arbiter.stdout.readline, b''): print(">>> %s" % line.rstrip()) if b'INFO:' in line: # I must find this line if b'[alignak.daemons.arbiterdaemon] I found myself in the configuration: arbiter-spare' in line: ok += 1 # and this one also if b'[alignak.daemons.arbiterdaemon] I am a spare Arbiter: arbiter-spare' in line: ok += 1 if b'I am not the master arbiter, I stop parsing the configuration' in line: ok += 1 if b'Waiting for master...' in line: ok += 1 if b'Waiting for master death' in line: ok += 1 assert b'CRITICAL:' not in line for line in iter(arbiter.stderr.readline, b''): print("*** %s" % line.rstrip()) if sys.version_info > (2, 7): assert False, "stderr output!" assert ok == 5 def test_arbiter_normal(self): """ Running the Alignak Arbiter - normal verbosity Expects log at the WARNING level - depends upon the logger configuration file :return: """ self._arbiter(verbosity=None) def test_arbiter_verbose(self): """ Running the Alignak Arbiter - normal verbosity Expects log at the INFO level :return: """ self._arbiter(verbosity='--verbose') def test_arbiter_verbose2(self): self._arbiter(verbosity='-v') def test_arbiter_very_verbose(self): """ Running the Alignak Arbiter - very verbose Expects log at the DEBUG level :return: """ self._arbiter(verbosity='--debug') def test_arbiter_very_verbose2(self): self._arbiter(verbosity='-vv') def _arbiter(self, verbosity=None, log_file=None): """ Running the Alignak Arbiter with a specific verbosity :return: """ # Set a specific logger configuration - do not use the default test configuration # to use the default shipped configuration os.environ['ALIGNAK_LOGGER_CONFIGURATION'] = \ os.path.abspath('./etc/warning_alignak-logger.json') print("Logger configuration file is: %s" % os.environ['ALIGNAK_LOGGER_CONFIGURATION']) print("Launching arbiter ...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-n", "arbiter-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder ] if verbosity: args.append(verbosity) arbiter = subprocess.Popen(args) print("%s launched (pid=%d)" % ('arbiter', arbiter.pid)) # Wait for the arbiter to get started time.sleep(5) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770') errors = 0 info_log = False debug_log = False with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: if 'DEBUG:' in line: debug_log = True if 'INFO:' in line: info_log = True if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # arbiter process may exit with no errors! # assert errors == 0 # Arbiter changed the log level to INFO because of the verify mode if verbosity in ['-v', '--verbose']: assert info_log is True # Arbiter changed the log level to DEBUG because of the verify mode if verbosity in ['-vv', '--debug']: assert debug_log is True def test_broker(self): """ Running the Alignak Broker :return: """ print("Launching broker ...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_broker.py"), "-n", "broker-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder ] broker = subprocess.Popen(args) print("%s launched (pid=%d)" % ('broker', broker.pid)) # Wait for the broker to get started time.sleep(2) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7772') errors = 0 with open('/tmp/alignak/log/broker-master.log') as f: for line in f: if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # broker process must exit with no errors assert errors == 0 def test_poller(self): """ Running the Alignak poller :return: """ print("Launching poller ...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_poller.py"), "-n", "poller-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder ] poller = subprocess.Popen(args) print("%s launched (pid=%d)" % ('poller', poller.pid)) # Wait for the poller to get started time.sleep(2) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7771') errors = 0 with open('/tmp/alignak/log/poller-master.log') as f: for line in f: if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # poller process must exit with a return code == 0 and no errors assert errors == 0 def test_reactionner(self): """ Running the Alignak reactionner :return: """ print("Launching reactionner ...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_reactionner.py"), "-n", "reactionner-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder ] reactionner = subprocess.Popen(args) print("%s launched (pid=%d)" % ('reactionner', reactionner.pid)) # Wait for the reactionner to get started time.sleep(2) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7769') errors = 0 with open('/tmp/alignak/log/reactionner-master.log') as f: for line in f: if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # reactionner process must exit with a return code == 0 and no errors assert errors == 0 def test_receiver(self): """ Running the Alignak receiver :return: """ print("Launching receiver ...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_receiver.py"), "-n", "receiver-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder ] receiver = subprocess.Popen(args) print("%s launched (pid=%d)" % ('receiver', receiver.pid)) # Wait for the receiver to get started time.sleep(2) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7773') errors = 0 with open('/tmp/alignak/log/receiver-master.log') as f: for line in f: if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # receiver process must exit with a return code == 0 and no errors assert errors == 0 def test_scheduler(self): """ Running the Alignak scheduler :return: """ print("Launching scheduler ...") args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_scheduler.py"), "-n", "scheduler-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder ] scheduler = subprocess.Popen(args) print("%s launched (pid=%d)" % ('scheduler', scheduler.pid)) # Wait for the scheduler to get started time.sleep(2) # This function will request the arbiter daemon to stop self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7768') errors = 0 with open('/tmp/alignak/log/scheduler-master.log') as f: for line in f: if 'ERROR:' in line or 'CRITICAL:' in line: print("*** %s" % line.rstrip()) errors = errors + 1 # scheduler process must exit with a return code == 0 and no errors assert errors == 0
class TestLaunchDaemons(AlignakTest): def setUp(self): pass def tearDown(self): pass def test_arbiter_missing_parameters(self): ''' Running the Alignak Arbiter with missing command line parameters :return: ''' pass def test_arbiter_no_environment(self): ''' Running the Alignak Arbiter without environment file :return: ''' pass def test_arbiter_class_no_environment(self): ''' Instantiate the Alignak Arbiter class without environment file :return: ''' pass def test_arbiter_class_env_default(self): ''' Instantiate the Alignak Arbiter class without legacy cfg files :return: ''' pass def test_arbiter_unexisting_environment(self): ''' Running the Alignak Arbiter with a not existing environment file :return: ''' pass def test_arbiter_no_monitoring_configuration(self): ''' Running the Alignak Arbiter with no monitoring configuration defined - no legacy cfg files :return: ''' pass def test_arbiter_unexisting_monitoring_configuration(self): ''' Running the Alignak Arbiter with a not existing monitoring configuration file :return: ''' pass def test_arbiter_bad_configuration(self): ''' Running the Alignak Arbiter with bad monitoring configuration (unknown sub directory) :return: ''' pass def test_arbiter_i_am_not_configured(self): ''' Running the Alignak Arbiter with missing arbiter configuration :return: ''' pass def test_arbiter_verify(self): ''' Running the Alignak Arbiter in verify mode only with the default shipped configuration :return: ''' pass def test_arbiter_parameters_pid(self): ''' Run the Alignak Arbiter with some parameters - set a pid file :return: ''' pass def test_arbiter_parameters_log(self): ''' Run the Alignak Arbiter with some parameters - log file name Log file name and log level may be specified on the command line :return: ''' pass @pytest.mark.skip("To be re-activated with spare mode") def test_arbiter_spare_missing_configuration(self): ''' Run the Alignak Arbiter in spare mode - missing spare configuration :return: ''' pass @pytest.mark.skip("To be re-activated with spare mode") def test_arbiter_spare_missing_configuration(self): ''' Run the Alignak Arbiter in spare mode - missing spare configuration :return: ''' pass def test_arbiter_normal(self): ''' Running the Alignak Arbiter - normal verbosity Expects log at the WARNING level - depends upon the logger configuration file :return: ''' pass def test_arbiter_verbose(self): ''' Running the Alignak Arbiter - normal verbosity Expects log at the INFO level :return: ''' pass def test_arbiter_verbose2(self): pass def test_arbiter_very_verbose(self): ''' Running the Alignak Arbiter - very verbose Expects log at the DEBUG level :return: ''' pass def test_arbiter_very_verbose2(self): pass def _arbiter(self, verbosity=None, log_file=None): ''' Running the Alignak Arbiter with a specific verbosity :return: ''' pass def test_broker(self): ''' Running the Alignak Broker :return: ''' pass def test_poller(self): ''' Running the Alignak poller :return: ''' pass def test_reactionner(self): ''' Running the Alignak reactionner :return: ''' pass def test_receiver(self): ''' Running the Alignak receiver :return: ''' pass def test_scheduler(self): ''' Running the Alignak scheduler :return: ''' pass
30
23
27
4
18
6
3
0.32
1
5
1
0
27
2
27
82
759
122
483
151
451
154
397
126
367
10
2
4
81
3,923
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/tests_integ/test_launch_daemons_modules.py
tests_integ.test_launch_daemons_modules.TestLaunchDaemonsModules
class TestLaunchDaemonsModules(AlignakTest): def setUp(self): super(TestLaunchDaemonsModules, self).setUp() if os.path.exists("/tmp/alignak/retention/"): print("Cleaning retention files...") for item in os.listdir("/tmp/alignak/retention/"): if item.endswith(".json"): os.remove(os.path.join("/tmp/alignak/retention/", item)) print("Cleaning pid and log files...") for daemon in ['arbiter-master', 'scheduler-master', 'broker-master', 'poller-master', 'reactionner-master', 'receiver-master']: if os.path.exists('/tmp/var/run/%s.pid' % daemon): os.remove('/tmp/var/run/%s.pid' % daemon) if os.path.exists('/tmp/var/log/%s.log' % daemon): os.remove('/tmp/var/log/%s.log' % daemon) def tearDown(self): print("Test terminated!") def test_daemons_modules(self): """Running the Alignak daemons with a simple configuration using the Example daemon configured on all the default daemons :return: None """ daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] # Copy and update the default configuration cfg_folder = '/tmp/alignak' self._prepare_configuration(copy=True, cfg_folder=cfg_folder) files = ['%s/etc/alignak.ini' % cfg_folder, '%s/etc/alignak.d/daemons.ini' % cfg_folder, '%s/etc/alignak.d/modules.ini' % cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) # Arbiter launches the other daemons cfg.set('daemon.arbiter-master', 'alignak_launched', '1') cfg.set('daemon.scheduler-master', 'alignak_launched', '1') cfg.set('daemon.poller-master', 'alignak_launched', '1') cfg.set('daemon.reactionner-master', 'alignak_launched', '1') cfg.set('daemon.receiver-master', 'alignak_launched', '1') cfg.set('daemon.broker-master', 'alignak_launched', '1') # Modules configuration cfg.set('daemon.arbiter-master', 'modules', 'Example') cfg.set('daemon.scheduler-master', 'modules', 'Example') cfg.set('daemon.poller-master', 'modules', 'Example') cfg.set('daemon.reactionner-master', 'modules', 'Example') cfg.set('daemon.receiver-master', 'modules', 'Example') cfg.set('daemon.broker-master', 'modules', 'Example') cfg.add_section('module.example') cfg.set('module.example', 'name', 'Example') cfg.set('module.example', 'type', 'test,test-module') cfg.set('module.example', 'python_name', 'alignak_module_example') cfg.set('module.example', 'option_1', 'foo') cfg.set('module.example', 'option_2', 'bar') cfg.set('module.example', 'option_3', 'foobar') with open('%s/etc/alignak.ini' % cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False self._run_alignak_daemons( cfg_folder=cfg_folder, arbiter_only=True, runtime=30) self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770') # Check daemons log files ignored_warnings = [ "hosts configuration warnings:", "Configuration warnings:", # u"the parameter $DIST_BIN$ is ambiguous! No value after =, assuming an empty string", "[host::module_host_1] notifications are enabled but no contacts " "nor contact_groups property is defined for this host", "Did not get any ", # Modules related warnings "The module Example is not a worker one, I remove it from the worker list.", # todo: this log does not look appropriate... investigate more on this! "is still living 10 seconds after a normal kill, I help it to die", "inner retention module is loaded but is not enabled.", "Retention directory created" ] ignored_errors = [ # Sometimes, the retention file is not correctly read .... # this only during the tests on Travis CI 'Expecting value: line 1 column 1 (char 0)', 'Trying to add actions from an unknown scheduler' ] (errors_raised, warnings_raised) = \ self._check_daemons_log_for_errors(daemons_list, ignored_warnings=ignored_warnings, ignored_errors=ignored_errors) # self.kill_daemons() assert errors_raised == 0, "Error logs raised!" print("No unexpected error logs raised by the daemons") assert warnings_raised == 0, "Warning logs raised!" print("No unexpected warning logs raised by the daemons") # @pytest.mark.skipif(sys.version_info[:2] < (2, 7), reason="Not available for Python < 2.7") @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_logs(self): """Running the Alignak daemons with the monitoring logs module :return: None """ if os.path.exists('/tmp/alignak-events.log'): os.remove('/tmp/alignak-events.log') daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] self._run_alignak_daemons(cfg_folder='cfg/run_daemons_logs', daemons_list=daemons_list, run_folder='/tmp', runtime=30, arbiter_only=True) self._stop_alignak_daemons() # Check daemons log files ignored_warnings = [ 'Alignak Backend is not configured. Some module features will not be available.', 'defined logger configuration file ' # 'Error on backend login: ', # 'Alignak backend is currently not available', # 'Exception: BackendException raised with code 1000', # 'Response: ' ] ignored_errors = [ # 'Error on backend login: ', # 'Configured user account is not allowed for this module' 'Trying to add actions from an unknown scheduler' ] (errors_raised, warnings_raised) = \ self._check_daemons_log_for_errors(daemons_list, ignored_warnings=ignored_warnings, ignored_errors=ignored_errors) # self.kill_daemons() assert errors_raised == 0, "Error logs raised!" print("No unexpected error logs raised by the daemons") assert warnings_raised == 0, "Warning logs raised!" print("No unexpected warning logs raised by the daemons") assert os.path.exists( '/tmp/alignak-events.log'), '/tmp/alignak-events.log does not exist!' count = 0 print("Monitoring logs:") with open('/tmp/alignak-events.log') as f: for line in f: print(("- : %s" % line)) count += 1 """ [1496076886] INFO: CURRENT HOST STATE: localhost;UP;HARD;0; [1496076886] INFO: TIMEPERIOD TRANSITION: 24x7;-1;1 [1496076886] INFO: TIMEPERIOD TRANSITION: workhours;-1;1 """ assert count >= 2 # @pytest.mark.skipif(sys.version_info[:2] < (2, 7), reason="Not available for Python < 2.7") @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_logs_restart_module(self): """Running the Alignak daemons with the monitoring logs module - stop and restart the module :return: None """ if os.path.exists('/tmp/alignak-events.log'): os.remove('/tmp/alignak-events.log') cfg_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cfg/run_daemons_logs') tmp_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'run/test_launch_daemons_modules_logs') # Currently it is the same as the default execution ... to be modified later. cfg_modules = { 'arbiter': '', 'scheduler': '', 'broker': 'logs', 'poller': '', 'reactionner': '', 'receiver': '' } nb_errors = self._run_alignak_daemons_modules( cfg_folder, tmp_folder, cfg_modules, 10) assert nb_errors == 0, "Error logs raised!" print("No error logs raised when daemons started and loaded the modules") assert os.path.exists( '/tmp/alignak-events.log'), '/tmp/alignak-events.log does not exist!' count = 0 print("Monitoring logs:") with open('/tmp/alignak-events.log') as f: for line in f: print(("- : %s" % line)) count += 1 """ [1496076886] INFO: CURRENT HOST STATE: localhost;UP;HARD;0; [1496076886] INFO: TIMEPERIOD TRANSITION: 24x7;-1;1 [1496076886] INFO: TIMEPERIOD TRANSITION: workhours;-1;1 """ assert count >= 2 # Kill the logs module module_pid = None for proc in psutil.process_iter(): if "module: logs" in proc.name(): print(("Found logs module in the ps: %s (pid=%d)" % (proc.name(), proc.pid))) module_pid = proc.pid assert module_pid is not None print(("Asking pid=%d to end..." % (module_pid))) daemon_process = psutil.Process(module_pid) daemon_process.terminate() try: daemon_process.wait(10) except psutil.TimeoutExpired: assert False, "Timeout!" except psutil.NoSuchProcess: print("not existing!") pass # Wait for the module to restart time.sleep(5) # self._kill_alignak_daemons() # Search for some specific logs in the broker daemon logs expected_logs = { 'broker': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_logs' for logs...", "[alignak.modulesmanager] Module properties: {'daemons': ['broker'], 'phases': ['running'], 'type': 'logs', 'external': True}", "[alignak.modulesmanager] Imported 'alignak_module_logs' for logs", "[alignak.modulesmanager] Loaded Python module 'alignak_module_logs' (logs)", # "[alignak.module] Give an instance of alignak_module_logs for alias: logs", "[alignak.module.logs] logger default configuration:", "[alignak.module.logs] - rotating logs in /tmp/alignak-events.log", "[alignak.module.logs] - log level: 20", "[alignak.module.logs] - rotation every 1 midnight, keeping 365 files", "[alignak.basemodule] Process for module logs received a signal: 15", "[alignak.module.logs] stopping...", "[alignak.module.logs] stopped", "[alignak.modulesmanager] The external module logs died unexpectedly!", "[alignak.modulesmanager] Setting the module logs to restart", "[alignak.basemodule] Starting external process for module logs..." ] } errors_raised = 0 travis_run = 'TRAVIS' in os.environ for name in ['broker']: assert os.path.exists('/tmp/%sd.log' % name), '/tmp/%sd.log does not exist!' % name print(("-----\n%s log file\n" % name)) with open('/tmp/%sd.log' % name) as f: lines = f.readlines() logs = [] for line in lines: # Catches WARNING and ERROR logs if 'WARNING' in line: line = line.split('WARNING: ') line = line[1] line = line.strip() print(("--- %s" % line[:-1])) if 'ERROR' in line: if "The external module logs died unexpectedly!" not in line: errors_raised += 1 line = line.split('ERROR: ') line = line[1] line = line.strip() print(("*** %s" % line[:-1])) # Catches INFO logs if 'INFO' in line: line = line.split('INFO: ') line = line[1] line = line.strip() if not travis_run: print((" %s" % line)) logs.append(line) if not travis_run: print(logs) for log in expected_logs[name]: print(("Last checked log %s: %s" % (name, log))) assert log in logs, logs # Still only two logs assert os.path.exists( '/tmp/alignak-events.log'), '/tmp/alignak-events.log does not exist!' count = 0 print("Monitoring logs:") with open('/tmp/alignak-events.log') as f: for line in f: print(("- : %s" % line)) count += 1 """ [1496076886] INFO: CURRENT HOST STATE: localhost;UP;HARD;0; [1496076886] INFO: TIMEPERIOD TRANSITION: 24x7;-1;1 [1496076886] INFO: TIMEPERIOD TRANSITION: workhours;-1;1 """ assert count >= 2 # @pytest.mark.skipif(sys.version_info[:2] < (2, 7), reason="Not available for Python < 2.7") @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_ws(self): """Running the Alignak daemons with the Web services module :return: None """ cfg_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cfg/run_daemons_ws') tmp_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'run/test_launch_daemons_modules_ws') # Currently it is the same as the default execution ... to be modified later. cfg_modules = { 'arbiter': '', 'scheduler': '', 'broker': '', 'poller': '', 'reactionner': '', 'receiver': 'web-services' } nb_errors = self._run_alignk_daemons_modules( cfg_folder, tmp_folder, cfg_modules, 10) assert nb_errors == 0, "Error logs raised!" print("No error logs raised when daemons started and loaded the modules") # Search the WS module module_pid = None for proc in psutil.process_iter(): if "module: web-services" in proc.name(): print(("Found WS module in the ps: %s (pid=%d)" % (proc.name(), proc.pid))) module_pid = proc.pid assert module_pid is not None self._stop_alignak_daemons() # Search for some specific logs in the broker daemon logs expected_logs = { 'receiver': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_ws' for web-services...", "[alignak.modulesmanager] Module properties: {'daemons': ['receiver'], 'phases': ['running'], 'type': 'web-services', 'external': True}", "[alignak.modulesmanager] Imported 'alignak_module_ws' for web-services", "[alignak.modulesmanager] Loaded Python module 'alignak_module_ws' (web-services)", # "[alignak.module] Give an instance of alignak_module_ws for alias: web-services", # "[alignak.module.web-services] Alignak host creation allowed: False", # "[alignak.module.web-services] Alignak service creation allowed: False", # "[alignak.module.web-services] Alignak external commands, set timestamp: True", # "[alignak.module.web-services] Alignak Backend is not configured. Some module features will not be available.", # "[alignak.module.web-services] Alignak Arbiter configuration: 127.0.0.1:7770", # "[alignak.module.web-services] Alignak Arbiter polling period: 5", # "[alignak.module.web-services] Alignak daemons get status period: 10", # "[alignak.module.web-services] SSL is not enabled, this is not recommended. You should consider enabling SSL!", "[alignak.daemon] I correctly loaded my modules: [web-services]", # On arbiter stop: # "[alignak.module.web-services] Alignak arbiter is currently not available.", "[alignak.modulesmanager] Request external process to stop for web-services", "[alignak.basemodule] I'm stopping module u'web-services' (pid=%d)" % module_pid, "[alignak.modulesmanager] External process stopped.", "[alignak.daemon] Stopped receiver-master." ] } errors_raised = 0 for name in ['receiver']: assert os.path.exists('/tmp/%sd.log' % name), '/tmp/%sd.log does not exist!' % name print(("-----\n%s log file\n" % name)) with open('/tmp/%sd.log' % name) as f: lines = f.readlines() logs = [] for line in lines: # Catches WARNING and ERROR logs if 'WARNING' in line: line = line.split('WARNING: ') line = line[1] line = line.strip() print(("--- %s" % line[:-1])) if 'ERROR' in line: print(("*** %s" % line[:-1])) if "The external module logs died unexpectedly!" not in line: errors_raised += 1 line = line.split('ERROR: ') line = line[1] line = line.strip() # Catches INFO logs if 'INFO' in line: line = line.split('INFO: ') line = line[1] line = line.strip() print((" %s" % line)) logs.append(line) for log in logs: print(("...%s" % log)) for log in expected_logs[name]: print(("Last checked log %s: %s" % (name, log))) assert log in logs, logs # @pytest.mark.skipif(sys.version_info[:2] < (2, 7), reason="Not available for Python < 2.7") @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_ws_logs(self): """Running the Alignak daemons with the Web services and Logs modules :return: None """ if os.path.exists('/tmp/alignak-events.log'): os.remove('/tmp/alignak-events.log') cfg_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cfg/run_daemons_ws_logs') tmp_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'run/test_launch_daemons_modules_ws_logs') # Currently it is the same as the default execution ... to be modified later. cfg_modules = { 'arbiter': '', 'scheduler': '', 'broker': 'logs', 'poller': '', 'reactionner': '', 'receiver': 'web-services' } nb_errors = self._run_daemons_modules( cfg_folder, tmp_folder, cfg_modules, 10) assert nb_errors == 0, "Error logs raised!" print("No error logs raised when daemons started and loaded the modules") # Search the modules modules_pids = {} for proc in psutil.process_iter(): if "module: web-services" in proc.name(): print(("Found WS module in the ps: %s (pid=%d)" % (proc.name(), proc.pid))) modules_pids['ws'] = proc.pid if "module: logs" in proc.name(): print(("Found logs module in the ps: %s (pid=%d)" % (proc.name(), proc.pid))) modules_pids['logs'] = proc.pid assert len(modules_pids) == 2 assert os.path.exists( '/tmp/alignak-events.log'), '/tmp/alignak-events.log does not exist!' count = 0 print("Monitoring logs:") with open('/tmp/alignak-events.log') as f: for line in f: print(("- : %s" % line)) count += 1 """ [1496076886] INFO: CURRENT HOST STATE: localhost;UP;HARD;0; [1496076886] INFO: TIMEPERIOD TRANSITION: 24x7;-1;1 [1496076886] INFO: TIMEPERIOD TRANSITION: workhours;-1;1 """ assert count >= 2 self.kill_daemons() # Search for some specific logs in the broker daemon logs expected_logs = { 'receiver': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_ws' for web-services...", "[alignak.modulesmanager] Module properties: {'daemons': ['receiver'], 'phases': ['running'], 'type': 'web-services', 'external': True}", "[alignak.modulesmanager] Imported 'alignak_module_ws' for web-services", "[alignak.modulesmanager] Loaded Python module 'alignak_module_ws' (web-services)", # "[alignak.module] Give an instance of alignak_module_ws for alias: web-services", # "[alignak.module.web-services] Alignak host creation allowed: False", # "[alignak.module.web-services] Alignak service creation allowed: False", # "[alignak.module.web-services] Alignak external commands, set timestamp: True", # "[alignak.module.web-services] Alignak Backend is not configured. Some module features will not be available.", # "[alignak.module.web-services] Alignak Arbiter configuration: 127.0.0.1:7770", # "[alignak.module.web-services] Alignak Arbiter polling period: 5", # "[alignak.module.web-services] Alignak daemons get status period: 10", # "[alignak.module.web-services] SSL is not enabled, this is not recommended. You should consider enabling SSL!", "[alignak.daemon] I correctly loaded my modules: [web-services]", # On arbiter stop: # "[alignak.module.web-services] Alignak arbiter is currently not available.", "[alignak.modulesmanager] Request external process to stop for web-services", "[alignak.basemodule] I'm stopping module u'web-services' (pid=%d)" % modules_pids['ws'], "[alignak.modulesmanager] External process stopped.", "[alignak.daemon] Stopped receiver-master." ], 'broker': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_logs' for logs...", "[alignak.modulesmanager] Module properties: {'daemons': ['broker'], 'phases': ['running'], 'type': 'logs', 'external': True}", "[alignak.modulesmanager] Imported 'alignak_module_logs' for logs", "[alignak.modulesmanager] Loaded Python module 'alignak_module_logs' (logs)", # "[alignak.module] Give an instance of alignak_module_logs for alias: logs", "[alignak.module.logs] logger default configuration:", "[alignak.module.logs] - rotating logs in /tmp/alignak-events.log", "[alignak.module.logs] - log level: 10", "[alignak.module.logs] - rotation every 1 midnight, keeping 365 files", "[alignak.module.logs] Alignak Backend is not configured. Some module features will not be available.", "[alignak.daemon] I correctly loaded my modules: [logs]", # On arbiter stop: # "[alignak.module.web-services] Alignak arbiter is currently not available.", "[alignak.modulesmanager] Request external process to stop for logs", "[alignak.basemodule] I'm stopping module u'logs' (pid=%d)" % modules_pids['logs'], "[alignak.modulesmanager] External process stopped.", "[alignak.daemon] Stopped broker-master." ] } errors_raised = 0 for name in ['receiver', 'broker']: assert os.path.exists('/tmp/%sd.log' % name), '/tmp/%sd.log does not exist!' % name print(("-----\n%s log file\n" % name)) with open('/tmp/%sd.log' % name) as f: lines = f.readlines() logs = [] for line in lines: # Catches WARNING and ERROR logs if 'WARNING' in line: line = line.split('WARNING: ') line = line[1] line = line.strip() print(("--- %s" % line[:-1])) if 'ERROR' in line: print(("*** %s" % line[:-1])) errors_raised += 1 line = line.split('ERROR: ') line = line[1] line = line.strip() # Catches INFO logs if 'INFO' in line: line = line.split('INFO: ') line = line[1] line = line.strip() print((" %s" % line)) logs.append(line) for log in logs: print(("...%s" % log)) for log in expected_logs[name]: print(("Last checked log %s: %s" % (name, log))) assert log in logs, logs # @pytest.mark.skipif(sys.version_info[:2] < (2, 7), reason="Not available for Python < 2.7") @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_backend(self): """Running the Alignak daemons with the backend modules - backend is not running so all modules are in error :return: None """ cfg_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cfg/run_daemons_backend') tmp_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'run/test_launch_daemons_modules_backend') # Currently it is the same as the default execution ... to be modified later. cfg_modules = { 'arbiter': 'backend_arbiter', 'scheduler': 'backend_scheduler', 'broker': 'backend_broker', 'poller': '', 'reactionner': '', 'receiver': '' } nb_errors = self._run_alignk_daemons_modules( cfg_folder, tmp_folder, cfg_modules, 20) # Search the WS module # module_pid = None # for proc in psutil.process_iter(): # if "module: web-services" in proc.name(): # print("Found WS module in the ps: %s (pid=%d)" % (proc.name(), proc.pid)) # module_pid = proc.pid # assert module_pid is not None self._kill_alignak_daemons() assert nb_errors >= 3, "Error logs raised!" # 1 for the arbiter # 1 for the broker # 3 for the scheduler print("Expected error logs raised when daemons started and loaded the modules") # Search for some specific logs in the broker daemon logs expected_logs = { 'arbiter': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_backend.arbiter' for backend_arbiter...", "[alignak.modulesmanager] Module properties: {'daemons': ['arbiter'], 'phases': ['configuration'], 'type': 'backend_arbiter', 'external': False}", "[alignak.modulesmanager] Imported 'alignak_module_backend.arbiter' for backend_arbiter", "[alignak.modulesmanager] Loaded Python module 'alignak_module_backend.arbiter' (backend_arbiter)", # "[alignak.module] Give an instance of alignak_module_backend.arbiter for alias: backend_arbiter", "[alignak.module.backend_arbiter] Number of processes used by backend client: 1", "[alignak.module.backend_arbiter] Alignak backend is not available for login. No backend connection, attempt: 1", "[alignak.module.backend_arbiter] Alignak backend is not available for login. No backend connection, attempt: 2", "[alignak.module.backend_arbiter] Alignak backend is not available for login. No backend connection, attempt: 3", "[alignak.module.backend_arbiter] bypass objects loading when Arbiter is in verify mode: False", "[alignak.module.backend_arbiter] configuration reload check period: 5 minutes", "[alignak.module.backend_arbiter] actions check period: 15 seconds", "[alignak.module.backend_arbiter] daemons state update period: 60 seconds", "[alignak.modulesmanager] Trying to initialize module: backend_arbiter", "[alignak.daemon] I correctly loaded my modules: [backend_arbiter]", "[alignak.daemons.arbiterdaemon] Getting Alignak global configuration from module 'backend_arbiter'", "[alignak.module.backend_arbiter] Alignak backend connection is not available. Skipping Alignak configuration load and provide an empty configuration to the Arbiter.", "[alignak.module.backend_arbiter] Alignak backend connection is not available. Skipping objects load and provide an empty list to the Arbiter." ], 'broker': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_backend.broker' for backend_broker...", "[alignak.modulesmanager] Module properties: {'daemons': ['broker'], 'type': 'backend_broker', 'external': True}", "[alignak.modulesmanager] Imported 'alignak_module_backend.broker' for backend_broker", "[alignak.modulesmanager] Loaded Python module 'alignak_module_backend.broker' (backend_broker)", # "[alignak.module] Give an instance of alignak_module_backend.broker for alias: backend_broker", "[alignak.module.backend_broker] Number of processes used by backend client: 1", # "[alignak.module.backend_broker] Error on backend login: ", "[alignak.module.backend_broker] Configured user account is not allowed for this module", "[alignak.daemon] I correctly loaded my modules: [backend_broker]", "[alignak.modulesmanager] Trying to initialize module: backend_broker", ], 'scheduler': [ "[alignak.modulesmanager] Importing Python module 'alignak_module_backend.scheduler' for backend_scheduler...", "[alignak.modulesmanager] Module properties: {'daemons': ['scheduler'], 'phases': ['running'], 'type': 'backend_scheduler', 'external': False}", "[alignak.modulesmanager] Imported 'alignak_module_backend.scheduler' for backend_scheduler", "[alignak.modulesmanager] Loaded Python module 'alignak_module_backend.scheduler' (backend_scheduler)", # "[alignak.module] Give an instance of alignak_module_backend.scheduler for alias: backend_scheduler", "[alignak.module.backend_scheduler] Number of processes used by backend client: 1", "[alignak.module.backend_scheduler] Alignak backend is not available for login. No backend connection, attempt: 1", "[alignak.modulesmanager] Trying to initialize module: backend_scheduler", "[alignak.daemon] I correctly loaded my modules: [backend_scheduler]", ] } errors_raised = 0 for name in ['arbiter', 'broker', 'scheduler']: assert os.path.exists('/tmp/%sd.log' % name), '/tmp/%sd.log does not exist!' % name print(("-----\n%s log file\n" % name)) with open('/tmp/%sd.log' % name) as f: lines = f.readlines() logs = [] for line in lines: # Catches WARNING and ERROR logs if 'WARNING:' in line: line = line.split('WARNING: ') line = line[1] line = line.strip() print(("--- %s" % line[:-1])) if 'ERROR:' in line: print(("*** %s" % line[:-1])) if "Error on backend login:" not in line \ and "Configured user account is not allowed for this module" not in line \ and "Alignak backend connection is not available. " not in line: errors_raised += 1 line = line.split('ERROR: ') line = line[1] line = line.strip() # Catches INFO logs if 'INFO:' in line: line = line.split('INFO: ') line = line[1] line = line.strip() print((" %s" % line)) logs.append(line) for log in logs: print(("...%s" % log)) for log in expected_logs[name]: print(("Last checked log %s: %s" % (name, log))) assert log in logs, logs assert errors_raised == 0
class TestLaunchDaemonsModules(AlignakTest): def setUp(self): pass def tearDown(self): pass def test_daemons_modules(self): '''Running the Alignak daemons with a simple configuration using the Example daemon configured on all the default daemons :return: None ''' pass @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_logs(self): '''Running the Alignak daemons with the monitoring logs module :return: None ''' pass @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_logs_restart_module(self): '''Running the Alignak daemons with the monitoring logs module - stop and restart the module :return: None ''' pass @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_ws(self): '''Running the Alignak daemons with the Web services module :return: None ''' pass @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_ws_logs(self): '''Running the Alignak daemons with the Web services and Logs modules :return: None ''' pass @pytest.mark.skip("No real interest for Alignak testings...") def test_daemons_modules_backend(self): '''Running the Alignak daemons with the backend modules - backend is not running so all modules are in error :return: None ''' pass
14
6
78
8
56
15
8
0.28
1
6
0
0
8
0
8
63
645
67
453
90
439
125
299
78
290
17
2
5
63
3,924
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.PropertyTests
class PropertyTests: """Common tests for all property classes""" def test_no_default_value(self): p = self.prop_class() assert p.default is NONE_OBJECT assert not p.has_default assert p.required def test_default_value(self): default_value = object() p = self.prop_class(default=default_value) assert p.default is default_value assert p.has_default assert not p.required def test_fill_brok(self): p = self.prop_class() assert 'full_status' not in p.fill_brok p = self.prop_class(default='0', fill_brok=['full_status']) assert 'full_status' in p.fill_brok def test_unused(self): p = self.prop_class() assert not p.unused
class PropertyTests: '''Common tests for all property classes''' def test_no_default_value(self): pass def test_default_value(self): pass def test_fill_brok(self): pass def test_unused(self): pass
5
1
5
0
5
0
1
0.05
0
1
0
7
4
0
4
4
25
4
20
10
15
1
20
10
15
1
0
0
4
3,925
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_config_class.py
tests.test_aa_config_class.TestConfigClassBase
class TestConfigClassBase(AlignakTest): """ This class tests the Config object initialization """ def setUp(self): super(TestConfigClassBase, self).setUp() def test_config_ok(self): """Test the object initialization and base features""" # --- # print("Reference to Config: %s" % sys.getrefcount(Config)) # mod = importlib.import_module("alignak.objects.config") # importlib.reload(mod) # # # # importlib.reload('alignak.objects.config') # print("Reference to Config: %s" % sys.getrefcount(Config)) # Fresh initialized configuration alignak_cfg = Config({}) assert alignak_cfg.magic_hash next_instance_id = "Config_%s" % Config._next_id # assert str(alignak_cfg) == '<Config Config_1 - unknown />' # Another fresh initialized configuration alignak_cfg = Config({}) assert alignak_cfg.magic_hash # Config instance_id incremented! assert next_instance_id == alignak_cfg.instance_id # assert str(alignak_cfg) == '<Config Config_2 - unknown />' pprint(alignak_cfg.macros) # ----------------------------------------------------------------------------------------- # Macro part # --- # Macro list is yet defined but the values are not yet set expected_macros = { # Main Config objects macros 'ALIGNAK': 'alignak_name', 'ALIGNAK_CONFIG': 'alignak_env', 'ADMINEMAIL': '', 'ADMINPAGER': '', 'MAINCONFIGDIR': 'config_base_dir', 'CONFIGFILES': 'config_files', 'MAINCONFIGFILE': 'main_config_file', 'OBJECTCACHEFILE': '', 'COMMENTDATAFILE': '', 'TEMPPATH': '', 'SERVICEPERFDATAFILE': '', 'RESOURCEFILE': '', 'COMMANDFILE': '', 'DOWNTIMEDATAFILE': '', 'HOSTPERFDATAFILE': '', 'LOGFILE': '', 'TEMPFILE': '', 'RETENTIONDATAFILE': '', 'STATUSDATAFILE': '', 'RETENTION_FILE': 'state_retention_file' } # The 64 "USER" macros. for i in range(1, 65): expected_macros['USER%d' % i] = '$USER%d$' % i assert alignak_cfg.macros == expected_macros # After several tests execution the Config object got imported several times and # has several python references. The properties object containing the macros is a # class object and has thus been updated because some configurations got loaded. # Because of this, a pure assertion is only valid when the test is the first one executed! compare_macros = {} for macro in list(alignak_cfg.macros.items()): compare_macros[macro[0]] = macro[1] # print(macro) # if macro[0] not in [ # 'DIST', 'DIST_BIN', 'DIST_ETC', 'DIST_LOG', 'DIST_RUN', 'DIST_VAR', # 'VAR', 'RUN', 'ETC', 'BIN', 'USER', 'GROUP', 'LIBEXEC', 'LOG', # 'NAGIOSPLUGINSDIR', 'PLUGINSDIR', '' # ]: # compare_macros[macro[0]] = macro[1] assert compare_macros == expected_macros assert alignak_cfg.macros == expected_macros # # Macro properties are not yet existing! # for macro in alignak_cfg.macros: # print("Macro: %s" % macro) # assert getattr(alignak_cfg, '$%s$' % macro, None) is None, \ # "Macro: %s property is still existing!" % ('$%s$' % macro) # ----------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------- # Configuration parsing part # --- # Read and parse the legacy configuration files, do not provide environment file name legacy_cfg_files = ['../etc/alignak.cfg'] raw_objects = alignak_cfg.read_config_buf( alignak_cfg.read_legacy_cfg_files(legacy_cfg_files) ) assert isinstance(raw_objects, dict) for daemon_type in ['arbiter', 'broker', 'poller', 'reactionner', 'receiver', 'scheduler']: assert daemon_type in raw_objects # Make sure we got all the managed objects type for o_type in alignak_cfg.types_creations: assert o_type in raw_objects, 'Did not found %s in configuration object' % o_type assert alignak_cfg.alignak_env == 'n/a' # Same parser that stores the environment files names env_filename = '../etc/alignak.ini' # It should be a list env_filename = [os.path.abspath(env_filename)] # Read and parse the legacy configuration files, do not provide environement file name raw_objects = alignak_cfg.read_config_buf( alignak_cfg.read_legacy_cfg_files(legacy_cfg_files, env_filename) ) assert alignak_cfg.alignak_env == env_filename # Same parser that stores a string (not list) environment file name # as an absolute file path in a list env_filename = '../etc/alignak.ini' # Read and parse the legacy configuration files, do not provide environement file name raw_objects = alignak_cfg.read_config_buf( alignak_cfg.read_legacy_cfg_files(legacy_cfg_files, env_filename) ) assert alignak_cfg.alignak_env == [os.path.abspath(env_filename)] # Same parser that stores the environment file name as an absolute file path env_filename = '../etc/alignak.ini' # Read and parse the legacy configuration files, do not provide environement file name raw_objects = alignak_cfg.read_config_buf( alignak_cfg.read_legacy_cfg_files(legacy_cfg_files, env_filename) ) assert alignak_cfg.alignak_env == [os.path.abspath(env_filename)] # ----------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------- # Macro part # --- # The macros defined in the default loaded configuration expected_macros.update({ # 'DIST': '$DIST$', # 'DIST_BIN': '$DIST_BIN$', # 'DIST_ETC': '$DIST_ETC$', # 'DIST_LOG': '$DIST_LOG$', # 'DIST_RUN': '$DIST_RUN$', # 'DIST_VAR': '$DIST_VAR$', # 'BIN': '$BIN$', # 'ETC': '$ETC$', # 'GROUP': '$GROUP$', # 'LIBEXEC': '$LIBEXEC$', # 'LOG': '$LOG$', # 'NAGIOSPLUGINSDIR': '', # 'PLUGINSDIR': '$', # 'RUN': '$RUN$', # 'USER': '$USER$', # 'USER1': '$NAGIOSPLUGINSDIR$', # 'VAR': '$VAR$' }) assert sorted(alignak_cfg.macros) == sorted(expected_macros) assert alignak_cfg.resource_macros_names == [] # Macro are not existing in the object attributes! for macro in alignak_cfg.macros: macro = alignak_cfg.macros[macro] assert getattr(alignak_cfg, '$%s$' % macro, None) is None, \ "Macro: %s property is existing as an attribute!" % ('$%s$' % macro) # But as an attribute of the properties attribute! for macro in alignak_cfg.macros: macro = alignak_cfg.macros[macro] assert getattr(alignak_cfg.properties, '$%s$' % macro, None) is None, \ "Macro: %s property is not existing as an attribute of properties!" % ('$%s$' % macro) def test_config_serialization(self): """Test the object initialization and base features""" # --- # print("Reference to Config: %s" % sys.getrefcount(Config)) # mod = importlib.import_module("alignak.objects.config") # importlib.reload(mod) # # # # importlib.reload('alignak.objects.config') # print("Reference to Config: %s" % sys.getrefcount(Config)) # Fresh initialized configuration alignak_cfg = Config({}) assert alignak_cfg.magic_hash # No objects still exist in the attributes for _, _, category, _, _ in list(alignak_cfg.types_creations.values()): assert getattr(alignak_cfg, category, None) is None # Read and parse the legacy configuration files, do not provide environment file name legacy_cfg_files = [ os.path.join(self._test_dir, '../etc/alignak.cfg') ] raw_objects = alignak_cfg.read_legacy_cfg_files(legacy_cfg_files) raw_objects = alignak_cfg.read_config_buf(raw_objects) # Create objects for arbiters and modules alignak_cfg.early_create_objects(raw_objects) # Only arbiters and modules objects exist in the attributes for _, _, category, _, _ in list(alignak_cfg.types_creations.values()): if category in ['arbiters', 'modules']: assert getattr(alignak_cfg, category, None) is not None else: assert getattr(alignak_cfg, category, None) is None # Create objects for all the configuration alignak_cfg.create_objects(raw_objects) # Now all objects exist in the attributes print("After parsing files:") for _, _, category, _, _ in list(alignak_cfg.types_creations.values()): assert getattr(alignak_cfg, category, None) is not None print("- %s %s" % (len(getattr(alignak_cfg, category)) if getattr(alignak_cfg, category) else 'no', category)) # Create Template links alignak_cfg.linkify_templates() # All inheritances alignak_cfg.apply_inheritance() # Explode between types alignak_cfg.explode() # Implicit inheritance for services alignak_cfg.apply_implicit_inheritance() # Fill default values for all the configuration objects alignak_cfg.fill_default_configuration() # Remove templates from config # Do not remove anymore! # alignak_cfg.remove_templates() # Overrides specific service instances properties alignak_cfg.override_properties() # Linkify objects to each other alignak_cfg.linkify() # applying dependencies alignak_cfg.apply_dependencies() # Raise warning about currently unmanaged parameters alignak_cfg.warn_about_unmanaged_parameters() # Explode global configuration parameters into Classes alignak_cfg.explode_global_conf() # set our own timezone and propagate it to other satellites alignak_cfg.propagate_timezone_option() # Look for business rules, and create the dep tree alignak_cfg.create_business_rules() # And link them alignak_cfg.create_business_rules_dependencies() # Now all objects exist in the attributes print("After objects creation:") for _, _, category, _, _ in list(alignak_cfg.types_creations.values()): assert getattr(alignak_cfg, category, None) is not None # Store and print the items length setattr(self, 'len_' + category, len(getattr(alignak_cfg, category))) print("- %s %s" % (len(getattr(alignak_cfg, category)) if getattr(alignak_cfg, category) else 'no', category)) for item in getattr(alignak_cfg, category): # Cleanable properties are still existing in the objects for prop in ['imported_from', 'use', 'plus', 'register', 'definition_order', 'configuration_warnings', 'configuration_errors']: assert hasattr(item, prop) assert alignak_cfg assert alignak_cfg.is_correct() assert alignak_cfg.conf_is_correct print("Errors: ", alignak_cfg.show_errors()) alignak_cfg.dump(dump_file_name='/tmp/dumped_configuration.json') dump = alignak_cfg.dump() # pprint(dump) # Configuration cleaning alignak_cfg.clean() # Now all objects exist in the attributes print("After objects cleaning:") for _, _, category, _, _ in list(alignak_cfg.types_creations.values()): assert getattr(alignak_cfg, category, None) is not None for item in getattr(alignak_cfg, category): # Cleanable properties are still existing in the objects for prop in ['imported_from', 'use', 'plus', 'definition_order', 'configuration_warnings', 'configuration_errors']: assert not hasattr(item, prop) # --- Contacts # Serialize to send to another daemon print("Contacts: %s" % alignak_cfg.contacts) for contact in alignak_cfg.contacts.templates: print("- %s" % (alignak_cfg.contacts.templates[contact])) for contact in alignak_cfg.contacts.items: print("- %s" % (alignak_cfg.contacts.items[contact])) res = serialize(alignak_cfg.contacts, no_json=True, printing=False) print("Serialized contacts: %s" % res) # pprint(res) # Un-serialize when received by a daemon result = unserialize(res, printing=False) print("Unserialized: %s" % result) assert len(result.templates) == 1 for uuid in result.templates: contact = result.templates[uuid] print("- %s" % contact) assert isinstance(contact, Contact) assert contact.__class__.my_type == "contact" assert contact.is_a_template() is True assert contact.get_name() in ['generic-contact'] assert len(result.items) == 2 for uuid in result.items: contact = result.items[uuid] print("- %s" % contact) assert isinstance(contact, Contact) assert contact.__class__.my_type == "contact" assert contact.is_a_template() is False assert contact.get_name() in ['guest', 'admin'] # --- Hosts # Serialize to send to another daemon print("Hosts: %s" % alignak_cfg.hosts) for host in alignak_cfg.hosts.templates: print("- %s" % (alignak_cfg.hosts.templates[host])) for host in alignak_cfg.hosts.items: print("- %s" % (alignak_cfg.hosts.items[host])) res = serialize(alignak_cfg.hosts, no_json=True, printing=False) print("Serialized hosts: %s" % res) # pprint(res) # Un-serialize when received by a daemon result = unserialize(res, printing=False) print("Unserialized: %s" % result) assert len(result.templates) == 9 for uuid in result.templates: host = result.templates[uuid] print("- %s" % host) assert isinstance(host, Host) assert host.__class__.my_type == "host" assert host.is_a_template() is True assert host.get_name() in ['generic-host', 'test-host', 'passive-host', 'no-importance', 'qualification', 'normal', 'important', 'production', 'top-for-business'] assert len(result.items) == 48 for uuid in result.items: host = result.items[uuid] print("- %s" % host) assert isinstance(host, Host) assert host.__class__.my_type == "host" assert host.is_a_template() is False # assert host.get_name() in ['guest', 'admin'] # Serialization and hashing s_conf_part = serialize(alignak_cfg) # pprint(s_conf_part) # Update, remove this # try: # s_conf_part = s_conf_part.encode('utf-8') # except UnicodeDecodeError: # pass # Not a JSON object but a dict! # data = json.loads(s_conf_part) data = s_conf_part assert '__sys_python_module__' in data assert data['__sys_python_module__'] == "alignak.objects.config.Config" assert 'content' in data assert isinstance(data['content'], dict) # print("Serialization content: ") # pprint(data['content']) for prop in ['host_perfdata_command', 'service_perfdata_command', 'host_perfdata_file_processing_command', 'service_perfdata_file_processing_command', 'global_host_event_handler', 'global_service_event_handler']: assert prop in data['content'] # If a command is set, then: # assert '__sys_python_module__' in data['content'][prop] # assert data['content'][prop]['__sys_python_module__'] == "alignak.commandcall.CommandCall" # but the default configuration used in this test do not define any command! # Now all objects exist in the attributes print("After objects unserialization:") for _, TheItems, category, _, _ in list(alignak_cfg.types_creations.values()): print("- %s" % category) if category in ['arbiters', 'schedulers', 'brokers', 'pollers', 'reactionners', 'receivers']: continue assert category in data['content'] # pprint(data['content'][category]) objects = unserialize(data['content'][category], printing=False) # pprint(objects) assert isinstance(objects, TheItems) print("- %s %s (saved: %s)" % (len(objects) if objects else 'no', category, getattr(self, 'len_' + category))) # assert 'items' in objects # assert 'templates' in objects # # Store and print the items length assert len(objects) == getattr(self, 'len_' + category) # print("- %s %s" % (len(objects) if objects else 'no', category)) # Create a Config from unserialization (no file parsing) new_cfg = Config(data['content'], parsing=False)
class TestConfigClassBase(AlignakTest): ''' This class tests the Config object initialization ''' def setUp(self): pass def test_config_ok(self): '''Test the object initialization and base features''' pass def test_config_serialization(self): '''Test the object initialization and base features''' pass
4
3
132
17
67
48
11
0.73
1
7
3
0
3
0
3
58
402
53
202
32
198
147
167
32
163
25
2
3
33
3,926
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/worker.py
alignak.worker.Worker
class Worker(object): # pylint: disable=too-many-instance-attributes """This class is used for poller and reactionner to work. The worker is a process launch by theses process and read Message in a Queue (self.actions_queue) They launch the Check and then send the result in the Queue self.m (master) they can die if they do not do anything (param timeout) """ # Auto generated identifiers _worker_ids = {} uuid = '' # None _process = None _idletime = None _timeout = None # pylint: disable=too-many-arguments def __init__(self, module_name, actions_queue, returns_queue, processes_by_worker, timeout=300, max_plugins_output_length=8192, target=None, loaded_into='unknown'): """ :param module_name: :param actions_queue: :param returns_queue: :param processes_by_worker: number of processes by worker :type processes_by_worker: int :param timeout: :type timeout: int :param max_plugins_output_length: max output lenght :type max_plugins_output_length: int :param target: :param loaded_into: """ # Set our own identifier cls = self.__class__ self.module_name = module_name if module_name not in cls._worker_ids: cls._worker_ids[module_name] = 1 self._id = '%s_%d' % (module_name, cls._worker_ids[module_name]) cls._worker_ids[module_name] += 1 # Update the logger with the worker identifier global logger # pylint: disable=invalid-name, global-statement logger = logging.getLogger(__name__ + '.' + self._id) # pylint: disable=invalid-name self.checks = [] self.t_each_loop = time.time() self._idletime = 0 self.actions_got = 0 self.actions_launched = 0 self.actions_finished = 0 self.interrupted = False self._timeout = timeout self.processes_by_worker = processes_by_worker # self.control_queue = Queue() # Private Control queue for the Worker self.control_queue = None self.max_plugins_output_length = max_plugins_output_length self.i_am_dying = False # Keep a trace where the worker is launched from (poller or reactionner?) self.loaded_into = loaded_into # By default, take our own code if target is None: target = self.work self._process = Process(target=self._prework, args=(target, actions_queue, returns_queue, self.control_queue)) logger.debug("[%s] created a new process", self._id) @staticmethod def _prework(real_work, *args): """Do the job... :param real_work: function to execute :param args: arguments :return: """ logger.debug("calling: %s with %s", real_work, args) real_work(*args) def get_module(self): """Accessor to get the worker module name :return: the worker module name :rtype: str """ return self.module_name def get_id(self): """Accessor to get the worker identifier :return: the worker auto-generated identifier :rtype: str """ return self._id def get_pid(self): """Accessor to get the worker process PID :return: the worker PID :rtype: int """ return self._process.pid def start(self): """Start the worker. Wrapper for calling start method of the process attribute :return: None """ self._process.start() def manage_signal(self, sig, frame): # pylint: disable=unused-argument """Manage signals caught by the process but I do not do anything... our master daemon is managing our termination. :param sig: signal caught by daemon :type sig: str :param frame: current stack frame :type frame: :return: None """ logger.info("worker '%s' (pid=%d) received a signal: %s", self._id, os.getpid(), SIGNALS_TO_NAMES_DICT[sig]) # Do not do anything... our master daemon is managing our termination. self.interrupted = True def set_exit_handler(self): """Set the signal handler to manage_signal (defined in this class) Only set handlers for signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2 :return: None """ signal.signal(signal.SIGINT, self.manage_signal) signal.signal(signal.SIGTERM, self.manage_signal) signal.signal(signal.SIGHUP, self.manage_signal) signal.signal(signal.SIGQUIT, self.manage_signal) def terminate(self): """Wrapper for calling terminate method of the process attribute Also close queues (input and output) and terminate queues thread :return: None """ # We can just terminate process, not threads self._process.terminate() # Is we are with a Manager() way # there should be not such functions # todo: what is this??? # if hasattr(self.actions_queue, 'close'): # self.actions_queue.close() # self.actions_queue.join_thread() def join(self, timeout=None): """Wrapper for calling join method of the process attribute :param timeout: time to wait for the process to terminate :type timeout: int :return: None """ self._process.join(timeout) def is_alive(self): """Wrapper for calling is_alive method of the process attribute :return: A boolean indicating if the process is alive :rtype: bool """ return self._process.is_alive() def get_new_checks(self, queue, return_queue): """Get new checks if less than nb_checks_max If no new checks got and no check in queue, sleep for 1 sec REF: doc/alignak-action-queues.png (3) :return: None """ try: logger.debug("get_new_checks: %s / %s", len(self.checks), self.processes_by_worker) while len(self.checks) < self.processes_by_worker: msg = queue.get_nowait() if msg is None: time.sleep(0.01) continue logger.debug("Got a message: %s", msg) if msg.get_type() == 'Do': logger.debug("Got an action: %s", msg.get_data()) self.checks.append(msg.get_data()) self.actions_got += 1 elif msg.get_type() == 'ping': msg = Message(_type='pong', data='pong!', source=self._id) logger.debug("Queuing message: %s", msg) return_queue.put_nowait(msg) logger.debug("Queued") else: logger.warning("Ignoring message of type: %s", msg.get_type()) except Full: logger.warning("Actions queue is full") except Empty: logger.debug("Actions queue is empty") if not self.checks: self._idletime += 1 # Maybe the Queue() has been deleted by our master ? except (IOError, EOFError) as exp: logger.warning("My actions queue is no more available: %s", str(exp)) self.interrupted = True except Exception as exp: # pylint: disable=broad-except logger.error("Failed getting messages in actions queue: %s", str(exp)) logger.debug("get_new_checks exit") def launch_new_checks(self): """Launch checks that are in status REF: doc/alignak-action-queues.png (4) :return: None """ # queue for chk in self.checks: if chk.status not in [ACT_STATUS_QUEUED]: continue logger.debug("Launch check: %s", chk.uuid) self._idletime = 0 self.actions_launched += 1 process = chk.execute() # Maybe we got a true big problem in the action launching if process == 'toomanyopenfiles': # We should die as soon as we return all checks logger.error("I am dying because of too many open files: %s", chk) self.i_am_dying = True else: if not isinstance(process, string_types): logger.debug("Launched check: %s, pid=%d", chk.uuid, process.pid) def manage_finished_checks(self, queue): """Check the status of checks if done, return message finished :) REF: doc/alignak-action-queues.png (5) :return: None """ to_del = [] wait_time = 1.0 now = time.time() logger.debug("--- manage finished checks") for action in self.checks: logger.debug("--- checking: last poll: %s, now: %s, wait_time: %s, action: %s", action.last_poll, now, action.wait_time, action) if action.status == ACT_STATUS_LAUNCHED and action.last_poll < now - action.wait_time: action.check_finished(self.max_plugins_output_length) wait_time = min(wait_time, action.wait_time) # If action done, we can launch a new one if action.status in [ACT_STATUS_DONE, ACT_STATUS_TIMEOUT]: logger.debug("--- check done/timeout: %s", action.uuid) self.actions_finished += 1 to_del.append(action) # We answer to our master try: msg = Message(_type='Done', data=action, source=self._id) logger.debug("Queuing message: %s", msg) queue.put_nowait(msg) except Exception as exp: # pylint: disable=broad-except logger.error("Failed putting messages in returns queue: %s", str(exp)) for chk in to_del: logger.debug("--- delete check: %s", chk.uuid) self.checks.remove(chk) # Little sleep logger.debug("--- manage finished checks terminated, I will wait: %s", wait_time) time.sleep(wait_time) def check_for_system_time_change(self): # pragma: no cover, hardly testable with unit tests... """Check if our system time change. If so, change our :return: 0 if the difference < 900, difference else :rtype: int """ now = time.time() difference = now - self.t_each_loop # Now set the new value for the tick loop self.t_each_loop = now # If we have more than 15 min time change, we need to compensate it # todo: confirm that 15 minutes is a good choice... if abs(difference) > 900: # pragma: no cover, not with unit tests... return difference return 0 def work(self, actions_queue, returns_queue, control_queue=None): # pragma: no cover """Wrapper function for do_work in order to catch the exception to see the real work, look at do_work :param actions_queue: Global Queue Master->Slave :type actions_queue: Queue.Queue :param returns_queue: queue managed by manager :type returns_queue: Queue.Queue :return: None """ try: logger.info("[%s] (pid=%d) starting my job...", self._id, os.getpid()) self.do_work(actions_queue, returns_queue, control_queue) logger.info("[%s] (pid=%d) stopped", self._id, os.getpid()) except ActionError as exp: logger.error("[%s] exited with an ActionError exception : %s", self._id, str(exp)) logger.debug(exp) raise # Catch any exception, log the exception and exit anyway except Exception as exp: # pragma: no cover, this should never happen indeed ;) logger.error("[%s] exited with an unmanaged exception : %s", self._id, str(exp)) logger.exception(exp) raise def do_work(self, actions_queue, returns_queue, control_queue=None): # pragma: no cover """Main function of the worker. * Get checks * Launch new checks * Manage finished checks :param actions_queue: Global Queue Master->Slave :type actions_queue: Queue.Queue :param returns_queue: queue managed by manager :type returns_queue: Queue.Queue :return: None """ # restore default signal handler for the workers: # signal.signal(signal.SIGTERM, signal.SIG_DFL) self.interrupted = False self.set_exit_handler() setproctitle("alignak-%s worker %s" % (self.loaded_into, self._id)) timeout = 1.0 self.checks = [] self.t_each_loop = time.time() while True: begin = time.time() logger.debug("--- loop begin: %s", begin) # If we are dying (big problem!) we do not # take new jobs, we just finished the current one if not self.i_am_dying: # REF: doc/alignak-action-queues.png (3) self.get_new_checks(actions_queue, returns_queue) # REF: doc/alignak-action-queues.png (4) self.launch_new_checks() # REF: doc/alignak-action-queues.png (5) self.manage_finished_checks(returns_queue) logger.debug("loop middle, %d checks", len(self.checks)) # Now get order from master, if any... if control_queue: try: control_message = control_queue.get_nowait() logger.info("[%s] Got a message: %s", self._id, control_message) if control_message and control_message.get_type() == 'Die': logger.info("[%s] The master said we must die... :(", self._id) break except Full: logger.warning("Worker control queue is full") except Empty: pass except Exception as exp: # pylint: disable=broad-except logger.error("Exception when getting master orders: %s. ", str(exp)) # Maybe someone asked us to die, if so, do it :) if self.interrupted: logger.info("I die because someone asked ;)") break # Look if we are dying, and if we finish all current checks # if so, we really die, our master poller will launch a new # worker because we were too weak to manage our job :( if not self.checks and self.i_am_dying: logger.warning("I die because I cannot do my job as I should " "(too many open files?)... forgive me please.") break # Manage a possible time change (our avant will be change with the diff) diff = self.check_for_system_time_change() begin += diff logger.debug("loop check timechange: %s", diff) timeout -= time.time() - begin if timeout < 0: timeout = 1.0 else: time.sleep(0.1) logger.debug("+++ loop end: timeout = %s, idle: %s, checks: %d, " "actions (got: %d, launched: %d, finished: %d)", timeout, self._idletime, len(self.checks), self.actions_got, self.actions_launched, self.actions_finished)
class Worker(object): '''This class is used for poller and reactionner to work. The worker is a process launch by theses process and read Message in a Queue (self.actions_queue) They launch the Check and then send the result in the Queue self.m (master) they can die if they do not do anything (param timeout) ''' def __init__(self, module_name, actions_queue, returns_queue, processes_by_worker, timeout=300, max_plugins_output_length=8192, target=None, loaded_into='unknown'): ''' :param module_name: :param actions_queue: :param returns_queue: :param processes_by_worker: number of processes by worker :type processes_by_worker: int :param timeout: :type timeout: int :param max_plugins_output_length: max output lenght :type max_plugins_output_length: int :param target: :param loaded_into: ''' pass @staticmethod def _prework(real_work, *args): '''Do the job... :param real_work: function to execute :param args: arguments :return: ''' pass def get_module(self): '''Accessor to get the worker module name :return: the worker module name :rtype: str ''' pass def get_id(self): '''Accessor to get the worker identifier :return: the worker auto-generated identifier :rtype: str ''' pass def get_pid(self): '''Accessor to get the worker process PID :return: the worker PID :rtype: int ''' pass def start(self): '''Start the worker. Wrapper for calling start method of the process attribute :return: None ''' pass def manage_signal(self, sig, frame): '''Manage signals caught by the process but I do not do anything... our master daemon is managing our termination. :param sig: signal caught by daemon :type sig: str :param frame: current stack frame :type frame: :return: None ''' pass def set_exit_handler(self): '''Set the signal handler to manage_signal (defined in this class) Only set handlers for signal.SIGTERM, signal.SIGINT, signal.SIGUSR1, signal.SIGUSR2 :return: None ''' pass def terminate(self): '''Wrapper for calling terminate method of the process attribute Also close queues (input and output) and terminate queues thread :return: None ''' pass def join(self, timeout=None): '''Wrapper for calling join method of the process attribute :param timeout: time to wait for the process to terminate :type timeout: int :return: None ''' pass def is_alive(self): '''Wrapper for calling is_alive method of the process attribute :return: A boolean indicating if the process is alive :rtype: bool ''' pass def get_new_checks(self, queue, return_queue): '''Get new checks if less than nb_checks_max If no new checks got and no check in queue, sleep for 1 sec REF: doc/alignak-action-queues.png (3) :return: None ''' pass def launch_new_checks(self): '''Launch checks that are in status REF: doc/alignak-action-queues.png (4) :return: None ''' pass def manage_finished_checks(self, queue): '''Check the status of checks if done, return message finished :) REF: doc/alignak-action-queues.png (5) :return: None ''' pass def check_for_system_time_change(self): '''Check if our system time change. If so, change our :return: 0 if the difference < 900, difference else :rtype: int ''' pass def work(self, actions_queue, returns_queue, control_queue=None): '''Wrapper function for do_work in order to catch the exception to see the real work, look at do_work :param actions_queue: Global Queue Master->Slave :type actions_queue: Queue.Queue :param returns_queue: queue managed by manager :type returns_queue: Queue.Queue :return: None ''' pass def do_work(self, actions_queue, returns_queue, control_queue=None): '''Main function of the worker. * Get checks * Launch new checks * Manage finished checks :param actions_queue: Global Queue Master->Slave :type actions_queue: Queue.Queue :param returns_queue: queue managed by manager :type returns_queue: Queue.Queue :return: None ''' pass
19
18
21
2
11
8
3
0.76
1
6
2
0
16
13
17
17
397
57
201
60
179
152
187
53
168
11
1
4
50
3,927
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests_integ/test_launch_arbiter.py
tests_integ.test_launch_arbiter.TestLaunchArbiter
class TestLaunchArbiter(AlignakTest): def setUp(self): super(TestLaunchArbiter, self).setUp() # Set an environment variable to change the default period of activity log (every 60 loops) os.environ['ALIGNAK_LOG_ACTIVITY'] = '1' self.cfg_folder = '/tmp/alignak' self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder) self.req = requests.Session() def tearDown(self): print("Test terminated!") def _ping_daemons(self, daemon_names=None): # ----- print("Pinging the daemons: %s" % (daemon_names if daemon_names else 'All')) satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } for name, port in list(satellite_map.items()): if daemon_names and name not in daemon_names: continue print("- pinging %s: http://127.0.0.1:%s/identity" % (name, port)) raw_data = self.req.get("http://127.0.0.1:%s/identity" % (port)) assert raw_data.status_code == 200 data = raw_data.json() print("%s, running id: %s" % (name, data['running_id'])) # ----- def _stop_daemons(self, daemon_names=None): # ----- print("Stopping the daemons: %s" % daemon_names) satellite_map = { 'arbiter': '7770', 'scheduler': '7768', 'broker': '7772', 'poller': '7771', 'reactionner': '7769', 'receiver': '7773' } for name, port in list(satellite_map.items()): if daemon_names and name not in daemon_names: continue print("- stopping %s: http://127.0.0.1:%s/stop_request" % (name, port)) raw_data = self.req.get("http://127.0.0.1:%s/stop_request?stop_now=0" % (port)) data = raw_data.json() print("- response = %s" % data) time.sleep(3) for name, port in list(satellite_map.items()): if daemon_names and name not in daemon_names: continue print("- stopping (now) %s: http://127.0.0.1:%s/stop_request" % (name, port)) raw_data = self.req.get("http://127.0.0.1:%s/stop_request?stop_now=1" % (port)) data = raw_data.json() print("- response = %s" % data) # ----- def test_arbiter_no_daemons(self): """ Run the Alignak Arbiter - all the expected daemons are missing and are not launched :return: """ self._run_arbiter_no_configured_daemons(False) def test_arbiter_no_daemons_launch(self): """ Run the Alignak Arbiter - all the expected daemons are missing and are launched :return: """ self._run_arbiter_no_configured_daemons(True) def _run_arbiter_no_configured_daemons(self, alignak_launched): """ Run the Alignak Arbiter - all the expected daemons are missing If alignak_launched, the arbiter will launch the missing daemons :return: """ # Copy the default Alignak shipped configuration to the run directory cfg_folder = '/tmp/alignak' print("Copy default configuration (../etc) to %s..." % cfg_folder) if os.path.exists('%s/etc' % cfg_folder): print("deleting existing configuration in %s/etc..." % cfg_folder) shutil.rmtree('%s/etc' % cfg_folder) shutil.copytree(os.path.join(self._test_dir, '../etc'), '%s/etc' % cfg_folder) # Remove the daemons configuration part! shutil.rmtree('%s/etc/alignak.d' % cfg_folder) if os.path.exists('%s/arbiter-master.log' % os.getcwd()): os.remove('%s/arbiter-master.log' % os.getcwd()) try: cfg = configparser.ConfigParser() cfg.read(['%s/etc/alignak.ini' % cfg_folder]) # Arbiter launches the daemons if alignak_launched: cfg.set('alignak-configuration', 'launch_missing_daemons', '1') else: cfg.set('alignak-configuration', 'launch_missing_daemons', '0') with open('%s/etc/alignak.ini' % cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False self._run_alignak_daemons(cfg_folder=cfg_folder, arbiter_only=True, runtime=30) # The arbiter will have stopped! ret = self.procs['arbiter-master'].poll() if ret is None: print("*** Arbiter is still running.") # Stop the arbiter self.procs['arbiter-master'].kill() # raw_data = self.req.get("http://127.0.0.1:7770/stop_request?stop_now=1") # data = raw_data.json() # print("Stop response: %s") time.sleep(2) else: print("*** Arbiter exited with: %s" % ret) assert ret == 4 expected_warnings = [ "No defined configuration for the daemon: arbiter-master.", u"- ignoring repeated file: /tmp/alignak/etc/arbiter/packs/resource.d/readme.cfg", u"Configuration warnings:", # u"the parameter $DIST_BIN$ is ambiguous! No value after =, assuming an empty string", u"No Nagios-like legacy configuration files configured.", u"If you need some, edit the 'alignak.ini' configuration file " u"to declare one or more 'cfg=' variables.", u"There is no arbiter, I add myself (arbiter-master) reachable on 127.0.0.1:7770", u"No realms defined, I am adding one as All", u"No scheduler defined, I am adding one on 127.0.0.1:10000", u"No reactionner defined, I am adding one on 127.0.0.1:10001", u"No poller defined, I am adding one on 127.0.0.1:10002", u"No broker defined, I am adding one on 127.0.0.1:10003", u"No receiver defined, I am adding one on 127.0.0.1:10004", ] if not alignak_launched: expected_warnings.extend([ u"A daemon (reactionner/Default-Reactionner) " u"that we must be related with cannot be connected: ", u"Setting the satellite Default-Reactionner as dead :(", u"Default-Reactionner is not alive for get_running_id", u"A daemon (poller/Default-Poller) " u"that we must be related with cannot be connected: ", u"Setting the satellite Default-Poller as dead :(", u"Default-Poller is not alive for get_running_id", u"A daemon (broker/Default-Broker) " u"that we must be related with cannot be connected: ", u"Setting the satellite Default-Broker as dead :(", u"Default-Broker is not alive for get_running_id", u"A daemon (receiver/Default-Receiver) " u"that we must be related with cannot be connected: ", u"Setting the satellite Default-Receiver as dead :(", u"Default-Receiver is not alive for get_running_id", u"A daemon (scheduler/Default-Scheduler) " u"that we must be related with cannot be connected: ", u"Setting the satellite Default-Scheduler as dead :(", u"Default-Scheduler is not alive for get_running_id", u"satellites connection #1 is not correct; " u"let's give another chance after 1 seconds...", u"satellites connection #2 is not correct; " u"let's give another chance after 1 seconds...", u"satellites connection #3 is not correct; " u"let's give another chance after 1 seconds...", ]) expected_errors = [ ] if not alignak_launched: expected_errors = [ u"All the daemons connections could not be established despite 3 tries! " u"Sorry, I bail out!", u"Sorry, I bail out, exit code: 4" ] if alignak_launched: # This function will only send a SIGTERM to the arbiter daemon # self._stop_daemons(['arbiter']) self._stop_alignak_daemons(arbiter_only=False) all_ok = True with open('%s/arbiter-master.log' % os.getcwd()) as f: for line in f: if 'WARNING:' in line: ok = False # Only some WARNING log are accepted: for l in expected_warnings: if l in line: ok = True break if ok: print("... %s" % line.rstrip()) else: print(">>> %s" % line.rstrip()) all_ok = False # assert ok if 'ERROR:' in line: ok = False # Only some WARNING log are accepted: for l in expected_errors: if l in line: ok = True break if ok: print("... %s" % line.rstrip()) else: print("*** %s" % line.rstrip()) all_ok = False assert all_ok def test_arbiter_daemons(self): """ Run the Alignak Arbiter - all the expected daemons are started by the arbiter and then the arbiter exits :return: """ # All the default configuration files are in /tmp/alignak/etc # Update monitoring configuration file variables try: cfg = configparser.ConfigParser() cfg.read(['/tmp/alignak/etc/alignak.ini', '/tmp/alignak/etc/alignak.d/daemons.ini']) cfg.set('alignak-configuration', 'launch_missing_daemons', '1') cfg.set('alignak-configuration', 'polling_interval', '1') cfg.set('alignak-configuration', 'daemons_check_period', '5') cfg.set('alignak-configuration', 'daemons_stop_timeout', '3') cfg.set('alignak-configuration', 'daemons_start_timeout', '1') cfg.set('alignak-configuration', 'daemons_new_conf_timeout', '1') cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '1') cfg.set('daemon.arbiter-master', 'alignak_launched', '1') cfg.set('daemon.scheduler-master', 'alignak_launched', '1') cfg.set('daemon.poller-master', 'alignak_launched', '1') cfg.set('daemon.poller-master', 'min_workers', '1') cfg.set('daemon.poller-master', 'max_workers', '1') cfg.set('daemon.reactionner-master', 'alignak_launched', '1') cfg.set('daemon.receiver-master', 'alignak_launched', '1') cfg.set('daemon.broker-master', 'alignak_launched', '1') with open('/tmp/alignak/etc/alignak.ini', "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", "/tmp/alignak/etc/alignak.ini" ] self.procs = {'arbiter-master': subprocess.Popen(args)} print("%s launched (pid=%d)" % ('arbiter', self.procs['arbiter-master'].pid)) # Sleep some few seconds because of the time needed to start the processes, # poll them and declare as faulty ! sleep(15) # The arbiter will NOT have stopped! It is still running ret = self.procs['arbiter-master'].poll() assert ret is None print("Started...") self._ping_daemons() # Sleep some few seconds to let the arbiter ping the daemons by itself sleep(30) self._ping_daemons() # This function will only send a SIGTERM to the arbiter daemon self._stop_daemons(['arbiter']) # self._stop_alignak_daemons(arbiter_only=True) with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: line = line.strip() print(line) if 'ERROR:' in line: assert False, "Raised an error!" def test_arbiter_daemons_kill_one_daemon(self): """ Run the Alignak Arbiter - all the expected daemons are started by the arbiter and then a daemon is killed ... the arbiter kills all the remaining daemons after a while and then stops :return: """ # All the default configuration files are in /tmp/alignak/etc # Update monitoring configuration file variables try: cfg = configparser.ConfigParser() cfg.read(['/tmp/alignak/etc/alignak.ini', '/tmp/alignak/etc/alignak.d/daemons.ini']) cfg.set('alignak-configuration', 'launch_missing_daemons', '1') cfg.set('alignak-configuration', 'polling_interval', '1') cfg.set('alignak-configuration', 'daemons_check_period', '3') cfg.set('alignak-configuration', 'daemons_stop_timeout', '1') cfg.set('alignak-configuration', 'daemons_start_timeout', '3') cfg.set('alignak-configuration', 'daemons_new_conf_timeout', '1') cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '1') cfg.set('daemon.arbiter-master', 'alignak_launched', '1') cfg.set('daemon.scheduler-master', 'alignak_launched', '1') cfg.set('daemon.poller-master', 'alignak_launched', '1') cfg.set('daemon.poller-master', 'min_workers', '1') cfg.set('daemon.poller-master', 'max_workers', '1') cfg.set('daemon.reactionner-master', 'alignak_launched', '1') cfg.set('daemon.receiver-master', 'alignak_launched', '1') cfg.set('daemon.broker-master', 'alignak_launched', '1') with open('/tmp/alignak/etc/alignak.ini', "w") as modified: cfg.write(modified) os.remove('/tmp/alignak/etc/alignak.d/daemons.ini') except Exception as exp: print("* parsing error in config file: %s" % exp) assert False args = [ os.path.join(self._test_dir, "../alignak/bin/alignak_arbiter.py"), "-e", "/tmp/alignak/etc/alignak.ini" ] self.procs = {'arbiter-master': subprocess.Popen(args)} print("%s launched (pid=%d)" % ('arbiter', self.procs['arbiter-master'].pid)) # Sleep some few seconds because of the time needed to start the processes, # poll them and declare as faulty ! sleep(30) # The arbiter will NOT have stopped! It is still running ret = self.procs['arbiter-master'].poll() assert ret is None print("Started...") self._ping_daemons() sleep(1) print("Killing one daemon process...") self._stop_daemons(['receiver']) # sleep(1) # self._ping_daemons() # sleep(2) sleep(30) self._stop_daemons(['arbiter']) all_ok = 0 with open('/tmp/alignak/log/arbiter-master.log') as f: for line in f: if 'WARNING:' in line: if "A daemon (receiver/receiver-master) that we must be " \ "related with cannot be connected" in line: all_ok = all_ok + 1 print("... %s" % line.rstrip()) if "Setting the satellite receiver-master as dead :(" in line: all_ok = all_ok + 1 print("... %s" % line.rstrip()) if "Dispatcher, these daemons are not configured: receiver-master, and a " \ "configuration has yet been dispatched dispatch, " \ "a new dispatch is required..." in line: all_ok = all_ok + 1 print("... %s" % line.rstrip()) assert all_ok >= 3
class TestLaunchArbiter(AlignakTest): def setUp(self): pass def tearDown(self): pass def _ping_daemons(self, daemon_names=None): pass def _stop_daemons(self, daemon_names=None): pass def test_arbiter_no_daemons(self): ''' Run the Alignak Arbiter - all the expected daemons are missing and are not launched :return: ''' pass def test_arbiter_no_daemons_launch(self): ''' Run the Alignak Arbiter - all the expected daemons are missing and are launched :return: ''' pass def _run_arbiter_no_configured_daemons(self, alignak_launched): ''' Run the Alignak Arbiter - all the expected daemons are missing If alignak_launched, the arbiter will launch the missing daemons :return: ''' pass def test_arbiter_daemons(self): ''' Run the Alignak Arbiter - all the expected daemons are started by the arbiter and then the arbiter exits :return: ''' pass def test_arbiter_daemons_kill_one_daemon(self): ''' Run the Alignak Arbiter - all the expected daemons are started by the arbiter and then a daemon is killed ... the arbiter kills all the remaining daemons after a while and then stops :return: ''' pass
10
5
40
5
29
6
5
0.22
1
6
0
0
9
3
9
64
370
53
263
48
253
57
196
39
186
18
2
5
42
3,928
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests_integ/test_launch_daemons_realms_and_checks.py
tests_integ.test_launch_daemons_realms_and_checks.TestLaunchDaemonsRealms
class TestLaunchDaemonsRealms(AlignakTest): def setUp(self): super(TestLaunchDaemonsRealms, self).setUp() # Set an environment variable to activate the logging of checks execution # With this the pollers/schedulers will raise INFO logs about the checks execution os.environ['ALIGNAK_LOG_ACTIONS'] = 'WARNING' # Change default daemonisation behavior: do not preserve file descriptors for stdin/stdout # Tricky configuration ! # os.environ['ALIGNAK_DO_NOT_PRESERVE_STDOUT'] = '1' # Set an environment variable to change the default period of activity log (every 60 loops) os.environ['ALIGNAK_LOG_ACTIVITY'] = '60' # Alignak daemons monitoring everay 3 seconds os.environ['ALIGNAK_DAEMON_MONITORING'] = '3' # Alignak arbiter self-monitoring - report statistics every 5 loop counts os.environ['ALIGNAK_SYSTEM_MONITORING'] = '5' # Log daemons loop turn os.environ['ALIGNAK_LOG_LOOP'] = 'INFO' def tearDown(self): print("Test terminated!") def test_checks_active_satellites(self): """ Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active mode :return: None """ self._run_checks(passive=False, hosts_count=10, duration=240, cfg_dir='default_many_hosts') def test_checks_active_satellites_daemons(self): """ Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active mode Satellites are started as daemons :return: None """ self._run_checks(passive=False, hosts_count=10, duration=240, cfg_dir='default_many_hosts', daemonize=True) def test_checks_active_satellites_multi_realms(self): """ Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active mode Several realms (All, North and South) with 10 hosts in each realm :return: None """ self._run_checks(passive=False, hosts_count=10, duration=120, cfg_dir='default_realms', more_daemons = { 'broker-North': { 'type': 'broker', 'name': 'broker-North', 'port': '40001', 'realm': 'North' }, 'broker-South': { 'type': 'broker', 'name': 'broker-South', 'port': '40002', 'realm': 'South' }, 'scheduler-North': { 'type': 'scheduler', 'name': 'scheduler-North', 'port': '20001', 'realm': 'North' }, 'scheduler-South': { 'type': 'scheduler', 'name': 'scheduler-South', 'port': '20002', 'realm': 'South' }, 'poller-North': { 'type': 'poller', 'name': 'poller-North', 'port': '30001', 'realm': 'North' }, 'poller-South': { 'type': 'poller', 'name': 'poller-South', 'port': '30002', 'realm': 'South' }, }, realms = ['All', 'North', 'South']) def test_checks_passive_satellites(self): """ Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in passive mode The scheduler pushes the actions to execute to pollers / reactionners and get the results from the pollers/reactionners :return: None """ self._run_checks(passive=True, hosts_count=10, duration=120, cfg_dir='default_many_hosts') def test_checks_passive_satellites_multi_realms(self): """ Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in passive mode The scheduler pushes the actions to execute to pollers / reactionners and get the results from the pollers/reactionners Several realms (All, North and South) with 10 hosts in each realm :return: None """ self._run_checks(passive=True, hosts_count=10, duration=120, cfg_dir='default_realms', more_daemons = { 'broker-North': { 'type': 'broker', 'name': 'broker-North', 'port': '40001', 'realm': 'North' }, 'broker-South': { 'type': 'broker', 'name': 'broker-South', 'port': '40002', 'realm': 'South' }, 'scheduler-North': { 'type': 'scheduler', 'name': 'scheduler-North', 'port': '20001', 'realm': 'North' }, 'scheduler-South': { 'type': 'scheduler', 'name': 'scheduler-South', 'port': '20002', 'realm': 'South' }, 'poller-North': { 'type': 'poller', 'name': 'poller-North', 'port': '30001', 'realm': 'North' }, 'poller-South': { 'type': 'poller', 'name': 'poller-South', 'port': '30002', 'realm': 'South' }, }, realms = ['All', 'North', 'South']) def _run_checks(self, passive=True, duration=60, hosts_count=10, cfg_dir='default_many_hosts', more_daemons=None, realms=None, daemonize=False): """ Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active or passive mode :return: None """ self.cfg_folder = '/tmp/alignak' daemons_list = ['broker-master', 'poller-master', 'reactionner-master', 'receiver-master', 'scheduler-master'] if realms is None: realms = ['All'] if more_daemons is not None: daemons_list += more_daemons.keys() print("Daemons: %s" % daemons_list) # Default shipped configuration preparation self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder) # Specific daemon load configuration preparation if os.path.exists(os.path.join(self._test_dir, './cfg/%s/alignak.cfg' % cfg_dir)): shutil.copy(os.path.join(self._test_dir, './cfg/%s/alignak.cfg' % cfg_dir), '%s/etc' % self.cfg_folder) if os.path.exists('%s/etc/arbiter' % self.cfg_folder): shutil.rmtree('%s/etc/arbiter' % self.cfg_folder) shutil.copytree(os.path.join(self._test_dir, './cfg/%s/arbiter' % cfg_dir), '%s/etc/arbiter' % self.cfg_folder) self._prepare_hosts_configuration(cfg_folder='%s/etc/arbiter/objects/hosts' % self.cfg_folder, hosts_count=hosts_count, target_file_name='hosts.cfg', realms=realms) # Some script commands must be copied in the test folder if os.path.exists(os.path.join(self._test_dir, './libexec/check_command.sh')): shutil.copy(os.path.join(self._test_dir, './libexec/check_command.sh'), '%s/check_command.sh' % self.cfg_folder) # Update the default configuration files files = ['%s/etc/alignak.ini' % self.cfg_folder] try: cfg = configparser.ConfigParser() cfg.read(files) cfg.set('alignak-configuration', 'tick_manage_internal_checks', '10') cfg.set('alignak-configuration', 'launch_missing_daemons', '1') cfg.set('alignak-configuration', 'daemons_start_timeout', '15') cfg.set('alignak-configuration', 'daemons_dispatch_timeout', '15') # A macro for the check script directory cfg.set('alignak-configuration', '_EXEC_DIR', self.cfg_folder) for daemon in daemons_list: if more_daemons and daemon in more_daemons: if not cfg.has_section('daemon.%s' % daemon): cfg.add_section('daemon.%s' % daemon) cfg.set('daemon.%s' % daemon, 'type', more_daemons[daemon]['type']) cfg.set('daemon.%s' % daemon, 'name', more_daemons[daemon]['name']) cfg.set('daemon.%s' % daemon, 'realm', more_daemons[daemon]['realm']) cfg.set('daemon.%s' % daemon, 'port', more_daemons[daemon]['port']) cfg.set('daemon.%s' % daemon, 'alignak_launched', '1') if cfg.has_section('daemon.%s' % daemon): cfg.set('daemon.%s' % daemon, 'alignak_launched', '1') # cfg.set('daemon.%s' % daemon, 'debug', '1') if daemonize: cfg.set('daemon.%s' % daemon, 'is_daemon', '1') if passive and 'poller' in daemon: cfg.set('daemon.%s' % daemon, 'passive', '1') if passive and 'reactionner' in daemon: cfg.set('daemon.%s' % daemon, 'passive', '1') with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified: cfg.write(modified) except Exception as exp: print("* parsing error in config file: %s" % exp) assert False run = True if run: # Run daemons for the required duration self._run_alignak_daemons(cfg_folder='/tmp/alignak', daemons_list=daemons_list, run_folder='/tmp/alignak', runtime=duration) self._stop_alignak_daemons() # Check daemons log files ignored_warnings = [ # Sometimes, daemons comunication problem happen :( u"Server not available:", u"Setting the satellite ", u"is not alive for", u"let's give another chance after 5 seconds...", # Configuration check u"Configuration warnings", # u"the parameter $DIST_BIN$ is ambiguous! No value after =, assuming an empty string", u"No realms defined, I am adding one as All", u"[host::localhost2] has no defined check command", u"hosts configuration warnings: 1, total: 2", u"[host::localhost2] has no defined check command", u"inner retention module is loaded but is not enabled", "Retention directory created", # Daemons not existing u"Some hosts exist in the realm ", u"Adding a scheduler", u"Added a scheduler", u"Adding a poller", u"Added a poller", u"Adding a broker", u"Added a broker", u"Adding a reactionner", u"Added a reactionner", u"Adding a receiver", u"Added a receiver", u"hostgroup allhosts", # Configuration dispatching u"The arbiter pushed a new configuration...", # If some actions are not reported as executed by a reactionner or poller, ignore the warning message ! u"actions never came back for the satellite", # Action execution log u'Timeout raised for ', u'spent too much time:', u'Launch command', u'Check result', u'Performance data', u'Action', u'Got check result', u'Echo the current state', u'Set host', u'Host localhost', u'Check to run:' ] ignored_errors = [ # 'Error on backend login: ', # 'Configured user account is not allowed for this module' 'Trying to add actions from an unknown scheduler' ] (errors_raised, warnings_raised) = \ self._check_daemons_log_for_errors(daemons_list, run_folder='/tmp/alignak', ignored_warnings=ignored_warnings, ignored_errors=ignored_errors, dump_all=False) assert errors_raised == 0, "Error logs raised!" print("No unexpected error logs raised by the daemons") assert warnings_raised == 0, "Warning logs raised!" print("No unexpected warning logs raised by the daemons") # Expected logs from the daemons expected_logs = { 'poller-master': [ # Check launch "Launch command: '%s/check_command.sh " % self.cfg_folder, "Action '%s/check_command.sh " % self.cfg_folder, "Check result for '%s/check_command.sh " % self.cfg_folder, "Performance data for '%s/check_command.sh " % self.cfg_folder, ], 'scheduler-master': [ # Internal host check "Set host localhost as UP (internal check)", ], 'reactionner-master': [ "Launch command: '/usr/bin/printf ", "Action '/usr/bin/printf " ] } service_checks = {} # Store services information for localhost service_checks["localhost"] = { "host-check": {"launch": 0, "run": 0, "exit": 0, "result": 0} } service_checks["localhost2"] = { "host-check": {"launch": 0, "run": 0, "exit": 0, "result": 0} } service_checks["localhost3"] = { "host-check": {"launch": 0, "run": 0, "exit": 0, "result": 0} } for realm in realms: for index in range(hosts_count): # Store services information for each host service_checks["host-%s-%d" % (realm.lower(), index)] = { "host-check": {"launch": 0, "run": 0, "exit": 0, "result": 0} } for service in ["dummy_echo", "dummy_unknown", "dummy_ok", "dummy_warning", "dummy_critical", "dummy_timeout", "extra-1", "extra-2", "extra-3", "extra-4"]: # Store services information for each host service service_checks["host-%s-%d" % (realm.lower(), index)][service] = { "launch": 0, "run": 0, "exit": 0, "result": 0 } # Poller log about the host check and services check if service not in ['dummy_echo']: # No internal check for the poller expected_logs['poller-master'].append("host-%s-%d host-check" % (realm.lower(), index)) expected_logs['poller-master'].append("host-%s-%d %s" % (realm.lower(), index, service)) # Scheduler log about the host check and services check expected_logs['scheduler-master'].append("check_command.sh host-%s-%d %s" % (realm.lower(), index, service)) expected_logs['scheduler-master'].append("Internal check: host-%s-%d/dummy_echo" % (realm.lower(), index)) # Reactionner log faulty services check if service in ["dummy_warning", "dummy_critical"]: expected_logs['reactionner-master'].append("host-%s-%d/%s" % (realm.lower(), index, service)) errors_raised = 0 scheduler_count = 0 poller_count = 0 travis_run = 'TRAVIS' in os.environ # Poller log: # run = "Launch command: '/tmp/check_command.sh host-1 dummy_critical 2'" # get = "Check result for '/tmp/check_command.sh host-1 dummy_critical 2': 2, Hi, checking host-1/dummy_critical -> exit=2" # Scheduler log # launch = "Check to run: Check 3d5bd56a-fb91-4e83-b54c-c103b4a00fd5 active, item: b2b25d87-b8f1-439c-bb35-986b229eced4, status: in_poller, command:'/tmp/check_command.sh host-9 dummy_critical 2'" # result = "Got check result: 2 for host-4/dummy_critical" for daemon in daemons_list: if not daemon.startswith('scheduler'): continue assert os.path.exists('/tmp/alignak/log/%s.log' % daemon), '/tmp/alignak/log/%s.log does not exist!' % daemon print(("-----\n%s log file\n" % daemon)) with open('/tmp/alignak/log/%s.log' % daemon) as f: lines = f.readlines() logs = [] for line in lines: # Catches WARNING logs if 'WARNING:' in line: # Catch warning for actions execution line = line.split('WARNING: ') line = line[1] line = line.strip() line = line.split('] ') try: line = line[1] line = line.strip() if not travis_run: print(("-ok-: %s" % line)) except IndexError: if not travis_run: print("***line: %s" % line) launch_search = re.search("command:'/tmp/alignak/check_command.sh ([A-Za-z0-9-_]+) ([A-Za-z0-9-_]+)", line, re.IGNORECASE) if launch_search: host = launch_search.group(1) service = launch_search.group(2) print("Service check launch: %s / %s" % (host, service)) service_checks[host][service]['launch'] += 1 result_search = re.search("Got check result: (.) for ([A-Za-z0-9-_]+)/([A-Za-z0-9-_]+)", line, re.IGNORECASE) if result_search: host = result_search.group(2) service = result_search.group(3) exit_code = result_search.group(1) print("Service check result: %s / %s - %s" % (host, service, exit_code)) service_checks[host][service]['result'] += 1 else: result_search = re.search("Got check result: (.) for ([A-Za-z0-9-_]+)$", line, re.IGNORECASE) if result_search: host = result_search.group(2) service = "host-check" exit_code = result_search.group(1) print("Service check result: %s / %s - %s" % (host, service, exit_code)) service_checks[host][service]['result'] += 1 for daemon in daemons_list: if not daemon.startswith('poller'): continue assert os.path.exists('/tmp/alignak/log/%s.log' % daemon), '/tmp/alignak/log/%s.log does not exist!' % daemon print(("-----\n%s log file\n" % daemon)) with open('/tmp/alignak/log/%s.log' % daemon) as f: lines = f.readlines() logs = [] for line in lines: # Catches WARNING and ERROR logs if 'WARNING:' in line: # Catch warning for actions execution line = line.split('WARNING: ') line = line[1] line = line.strip() line = line.split('] ') try: line = line[1] line = line.strip() # if not travis_run: # print(("-ok-: %s" % line)) except IndexError: if not travis_run: print("***line: %s" % line) run_search = re.search("Launch command: '/tmp/alignak/check_command.sh ([A-Za-z0-9-_]+) ([A-Za-z0-9-_]+)", line, re.IGNORECASE) if run_search: host = run_search.group(1) service = run_search.group(2) print("Service check run: %s / %s" % (host, service)) service_checks[host][service]['run'] += 1 exit_search = re.search("Check result for '/tmp/alignak/check_command.sh ([A-Za-z0-9-_]+) ([A-Za-z0-9-_]+) (.)", line, re.IGNORECASE) if exit_search: host = exit_search.group(1) service = exit_search.group(2) exit_code = exit_search.group(3) print("Service check exit: %s / %s - %s" % (host, service, exit_code)) service_checks[host][service]['exit'] += 1 print("Service checks") for host in sorted(service_checks): print("Host: %s" % host) for service in service_checks[host]: svc_counts = service_checks[host][service] print("- %s: %s" % (service, svc_counts)) if svc_counts['launch'] != svc_counts['result']: print("*****") if svc_counts['run'] != svc_counts['exit']: print("*****")
class TestLaunchDaemonsRealms(AlignakTest): def setUp(self): pass def tearDown(self): pass def test_checks_active_satellites(self): ''' Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active mode :return: None ''' pass def test_checks_active_satellites_daemons(self): ''' Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active mode Satellites are started as daemons :return: None ''' pass def test_checks_active_satellites_multi_realms(self): ''' Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active mode Several realms (All, North and South) with 10 hosts in each realm :return: None ''' pass def test_checks_passive_satellites(self): ''' Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in passive mode The scheduler pushes the actions to execute to pollers / reactionners and get the results from the pollers/reactionners :return: None ''' pass def test_checks_passive_satellites_multi_realms(self): ''' Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in passive mode The scheduler pushes the actions to execute to pollers / reactionners and get the results from the pollers/reactionners Several realms (All, North and South) with 10 hosts in each realm :return: None ''' pass def _run_checks(self, passive=True, duration=60, hosts_count=10, cfg_dir='default_many_hosts', more_daemons=None, realms=None, daemonize=False): ''' Run the Alignak daemons and check the correct checks result and notifications with some pollers / reactionners in active or passive mode :return: None ''' pass
9
6
55
6
39
10
6
0.26
1
5
0
0
8
1
8
63
446
58
309
40
299
79
185
36
176
42
2
6
49
3,929
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/util.py
alignak.util.KeyValueSyntaxError
class KeyValueSyntaxError(ValueError): """Syntax error on a duplicate_foreach value"""
class KeyValueSyntaxError(ValueError): '''Syntax error on a duplicate_foreach value''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
11
2
0
1
1
0
1
1
1
0
0
4
0
0
3,930
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/stats.py
alignak.stats.Stats
class Stats(object): # pylint: disable=too-many-instance-attributes """Stats class to export data into a statsd (or carbon/Graphite) format This class allows to send metrics to a StatsD server using UDP datagrams. Same behavior as:: echo "foo:1|c" | nc -u -w0 127.0.0.1 8125 With the Graphite option, this class stores the metrics in an inner list and flushes the metrics to a Graphite instance when the flush method is called. """ def __init__(self): # Our daemon type and name self.name = '' # This attribute is not used, but I keep ascending compatibility with former interface! self._type = None # Our known statistics self.stats = {} # local statsd part self.statsd_host = None self.statsd_port = None self.statsd_prefix = None self.statsd_enabled = None # local broks part self.broks_enabled = None # Statsd daemon parameters self.statsd_sock = None self.statsd_addr = None # Graphite connection self.carbon = None self.my_metrics = [] self.metrics_flush_count = int(os.getenv('ALIGNAK_STATS_FLUSH_COUNT', '256')) self.last_failure = 0 self.metrics_flush_pause = int(os.getenv('ALIGNAK_STATS_FLUSH_PAUSE', '10')) self.log_metrics_flush_pause = False # File part self.stats_file = None self.file_d = None if 'ALIGNAK_STATS_FILE' in os.environ: self.stats_file = os.environ['ALIGNAK_STATS_FILE'] self.line_fmt = '[#date#] #counter# #value# #uom#\n' if 'ALIGNAK_STATS_FILE_LINE_FMT' in os.environ: self.line_fmt = os.environ['ALIGNAK_STATS_FILE_LINE_FMT'] self.date_fmt = '%Y-%m-%d %H:%M:%S' if 'ALIGNAK_STATS_FILE_DATE_FMT' in os.environ: self.date_fmt = os.environ['ALIGNAK_STATS_FILE_DATE_FMT'] @property def metrics_count(self): """ Number of internal stored metrics :return: """ return len(self.my_metrics) def __repr__(self): # pragma: no cover return '<StatsD report to %r:%r, enabled: %r />' \ % (self.statsd_host, self.statsd_port, self.statsd_enabled) __str__ = __repr__ def register(self, name, _type, statsd_host='localhost', statsd_port=8125, statsd_prefix='alignak', statsd_enabled=False, broks_enabled=False): """Init instance with real values :param name: daemon name :type name: str :param _type: daemon type :type _type: :param statsd_host: host to post data :type statsd_host: str :param statsd_port: port to post data :type statsd_port: int :param statsd_prefix: prefix to add to metric :type statsd_prefix: str :param statsd_enabled: bool to enable statsd :type statsd_enabled: bool :param broks_enabled: bool to enable broks sending :type broks_enabled: bool :return: None """ self.name = name # This attribute is not used, but I keep ascending compatibility with former interface! self._type = _type # local statsd part self.statsd_host = statsd_host self.statsd_port = int(statsd_port) self.statsd_prefix = statsd_prefix self.statsd_enabled = statsd_enabled # local broks part self.broks_enabled = broks_enabled logger.debug("StatsD configuration for %s - %s:%s, prefix: %s, " "enabled: %s, broks: %s, file: %s", self.name, self.statsd_host, self.statsd_port, self.statsd_prefix, self.statsd_enabled, self.broks_enabled, self.stats_file) if self.statsd_enabled and self.statsd_host is not None and self.statsd_host != 'None': logger.info("Sending %s statistics to: %s:%s, prefix: %s", self.name, self.statsd_host, self.statsd_port, self.statsd_prefix) if self.load_statsd(): logger.info('Alignak internal statistics are sent to StatsD.') else: logger.info('StatsD server is not available.') if self.stats_file: try: self.file_d = open(self.stats_file, 'a') logger.info("Alignak internal statistics are written in the file %s", self.stats_file) except OSError as exp: # pragma: no cover, should never happen... logger.exception("Error when opening the file '%s' : %s", self.stats_file, exp) self.file_d = None return self.statsd_enabled def load_statsd(self): """Create socket connection to statsd host Note that because of the UDP protocol used by StatsD, if no server is listening the socket connection will be accepted anyway :) :return: True if socket got created else False and an exception log is raised """ if not self.statsd_enabled: logger.info('Stats reporting is not enabled, connection is not allowed') return False if self.statsd_enabled and self.carbon: self.my_metrics.append(('.'.join([self.statsd_prefix, self.name, 'connection-test']), (int(time.time()), int(time.time())))) self.carbon.add_data_list(self.my_metrics) self.flush(log=True) else: try: logger.info('Trying to contact StatsD server...') self.statsd_addr = (socket.gethostbyname(self.statsd_host.encode('utf-8')), self.statsd_port) self.statsd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) except (socket.error, socket.gaierror) as exp: logger.warning('Cannot create StatsD socket: %s', exp) return False except Exception as exp: # pylint: disable=broad-except logger.exception('Cannot create StatsD socket (other): %s', exp) return False logger.info('StatsD server contacted') return True def connect(self, name, _type, host='localhost', port=2004, prefix='alignak', enabled=False, broks_enabled=False): """Init instance with real values for a graphite/carbon connection :param name: daemon name :type name: str :param _type: daemon type :type _type: :param host: host to post data :type host: str :param port: port to post data :type port: int :param prefix: prefix to add to metric :type prefix: str :param enabled: bool to enable statsd :type enabled: bool :param broks_enabled: bool to enable broks sending :type broks_enabled: bool :return: None """ self.name = name # This attribute is not used, but I keep ascending compatibility with former interface! self._type = _type # local graphite/carbon part self.statsd_host = host try: self.statsd_port = int(port) except ValueError: self.statsd_port = 2004 self.statsd_prefix = prefix self.statsd_enabled = enabled # local broks part self.broks_enabled = broks_enabled logger.debug("Graphite/carbon configuration for %s - %s:%s, prefix: %s, " "enabled: %s, broks: %s, file: %s", self.name, self.statsd_host, self.statsd_port, self.statsd_prefix, self.statsd_enabled, self.broks_enabled, self.stats_file) if self.statsd_enabled and self.statsd_host is not None and self.statsd_host != 'None': logger.info("Sending %s statistics to: %s:%s, prefix: %s", self.name, self.statsd_host, self.statsd_port, self.statsd_prefix) self.carbon = CarbonIface(self.statsd_host, self.statsd_port) logger.info('Statistics for %s will be sent to %s:%s.', self.name, self.statsd_host, self.statsd_port) return self.statsd_enabled def flush(self, log=False): """Send inner stored metrics to the defined Graphite Returns False if the sending failed with a warning log if log parameter is set :return: bool """ if not self.my_metrics: logger.debug("Flushing - no metrics to send") return True now = int(time.time()) if self.last_failure and self.last_failure + self.metrics_flush_pause > now: if not self.log_metrics_flush_pause: date = datetime.datetime.fromtimestamp( self.last_failure).strftime(self.date_fmt) logger.warning("Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send...", date, self.metrics_count) self.log_metrics_flush_pause = True return True try: logger.debug("Flushing %d metrics to %s:%s for %s.", self.metrics_count, self.statsd_host, self.statsd_port, self.name) if self.carbon.send_data(): self.my_metrics = [] else: logger.warning("Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d", self.metrics_count) if log: logger.warning("Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d", self.metrics_count) return False if self.log_metrics_flush_pause: logger.warning("Metrics flush restored. " "Remaining stored metric: %d", self.metrics_count) self.last_failure = 0 self.log_metrics_flush_pause = False except Exception as exp: # pylint: disable=broad-except if not self.log_metrics_flush_pause: logger.warning("Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d", self.metrics_count) else: date = datetime.datetime.fromtimestamp( self.last_failure).strftime(self.date_fmt) logger.warning("Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send...", date, self.metrics_count) logger.warning("Exception: %s", str(exp)) self.last_failure = now return False return True def send_to_graphite(self, metric, value, timestamp=None): """ Inner store a new metric and flush to Graphite if the flush threshold is reached. If no timestamp is provided, get the current time for the metric timestam. :param metric: metric name in dotted format :type metric: str :param value: :type value: float :param timestamp: metric timestamp :type timestamp: int """ # Manage Graphite part if not self.statsd_enabled or not self.carbon: return if timestamp is None: timestamp = int(time.time()) self.my_metrics.append(('.'.join([self.statsd_prefix, self.name, metric]), (timestamp, value))) if self.metrics_count >= self.metrics_flush_count: self.carbon.add_data_list(self.my_metrics) self.flush() def timer(self, key, value, timestamp=None): """Set a timer value If the inner key does not exist is is created :param key: timer to update :type key: str :param value: timer value (in seconds) :type value: float :param timestamp: metric timestamp :type timestamp: int :return: An alignak_stat brok if broks are enabled else None """ _min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0)) count += 1 _sum += value if _min is None or value < _min: _min = value if _max is None or value > _max: _max = value self.stats[key] = (_min, _max, count, _sum) # Manage local statsd part if self.statsd_enabled and self.statsd_sock: # beware, we are sending ms here, timer is in seconds packet = '%s.%s.%s:%d|ms' % (self.statsd_prefix, self.name, key, value * 1000) packet = packet.encode('utf-8') try: self.statsd_sock.sendto(packet, self.statsd_addr) except (socket.error, socket.gaierror): pass # cannot send? ok not a huge problem here and we cannot # log because it will be far too verbose :p # Manage Graphite part if self.statsd_enabled and self.carbon: self.send_to_graphite(key, value, timestamp=timestamp) # Manage file part if self.statsd_enabled and self.file_d: if timestamp is None: timestamp = int(time.time()) packet = self.line_fmt if not self.date_fmt: date = "%s" % timestamp else: date = datetime.datetime.fromtimestamp(timestamp).strftime(self.date_fmt) packet = packet.replace("#date#", date) packet = packet.replace("#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key)) # beware, we are sending ms here, timer is in seconds packet = packet.replace("#value#", '%d' % (value * 1000)) packet = packet.replace("#uom#", 'ms') # Do not log because it is spamming the log file, but leave this code in place # for it may be restored easily if more tests are necessary... ;) # logger.debug("Writing data: %s", packet) try: self.file_d.write(packet) except IOError: logger.warning("Could not write to the file: %s", packet) if self.broks_enabled: logger.debug("alignak stat brok: %s = %s", key, value) if timestamp is None: timestamp = int(time.time()) return Brok({'type': 'alignak_stat', 'data': { 'ts': timestamp, 'type': 'timer', 'metric': '%s.%s.%s' % (self.statsd_prefix, self.name, key), 'value': value * 1000, 'uom': 'ms' }}) return None def counter(self, key, value, timestamp=None): """Set a counter value If the inner key does not exist is is created :param key: counter to update :type key: str :param value: counter value :type value: float :return: An alignak_stat brok if broks are enabled else None """ _min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0)) count += 1 _sum += value if _min is None or value < _min: _min = value if _max is None or value > _max: _max = value self.stats[key] = (_min, _max, count, _sum) # Manage local statsd part if self.statsd_enabled and self.statsd_sock: # beware, we are sending ms here, timer is in seconds packet = '%s.%s.%s:%d|c' % (self.statsd_prefix, self.name, key, value) packet = packet.encode('utf-8') try: self.statsd_sock.sendto(packet, self.statsd_addr) except (socket.error, socket.gaierror): pass # cannot send? ok not a huge problem here and we cannot # log because it will be far too verbose :p # Manage Graphite part if self.statsd_enabled and self.carbon: self.send_to_graphite(key, value, timestamp=timestamp) # Manage file part if self.statsd_enabled and self.file_d: if timestamp is None: timestamp = int(time.time()) packet = self.line_fmt if not self.date_fmt: date = "%s" % timestamp else: date = datetime.datetime.fromtimestamp(timestamp).strftime(self.date_fmt) packet = packet.replace("#date#", date) packet = packet.replace("#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key)) packet = packet.replace("#value#", '%d' % value) packet = packet.replace("#uom#", 'c') try: self.file_d.write(packet) except IOError: logger.warning("Could not write to the file: %s", packet) if self.broks_enabled: logger.debug("alignak stat brok: %s = %s", key, value) if timestamp is None: timestamp = int(time.time()) return Brok({'type': 'alignak_stat', 'data': { 'ts': timestamp, 'type': 'counter', 'metric': '%s.%s.%s' % (self.statsd_prefix, self.name, key), 'value': value, 'uom': 'c' }}) return None def gauge(self, key, value, timestamp=None): """Set a gauge value If the inner key does not exist is is created :param key: gauge to update :type key: str :param value: counter value :type value: float :return: An alignak_stat brok if broks are enabled else None """ _min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0)) count += 1 _sum += value if _min is None or value < _min: _min = value if _max is None or value > _max: _max = value self.stats[key] = (_min, _max, count, _sum) # Manage local statsd part if self.statsd_enabled and self.statsd_sock: # beware, we are sending ms here, timer is in seconds packet = '%s.%s.%s:%d|g' % (self.statsd_prefix, self.name, key, value) packet = packet.encode('utf-8') try: self.statsd_sock.sendto(packet, self.statsd_addr) except (socket.error, socket.gaierror): pass # cannot send? ok not a huge problem here and we cannot # log because it will be far too verbose :p # Manage file part if self.statsd_enabled and self.file_d: if timestamp is None: timestamp = int(time.time()) packet = self.line_fmt if not self.date_fmt: date = "%s" % timestamp else: date = datetime.datetime.fromtimestamp(timestamp).strftime(self.date_fmt) packet = packet.replace("#date#", date) packet = packet.replace("#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key)) packet = packet.replace("#value#", '%d' % value) packet = packet.replace("#uom#", 'g') # Do not log because it is spamming the log file, but leave this code in place # for it may be restored easily if more tests are necessary... ;) # logger.debug("Writing data: %s", packet) try: self.file_d.write(packet) except IOError: logger.warning("Could not write to the file: %s", packet) # Manage Graphite part if self.statsd_enabled and self.carbon: self.send_to_graphite(key, value, timestamp=timestamp) if self.broks_enabled: logger.debug("alignak stat brok: %s = %s", key, value) if timestamp is None: timestamp = int(time.time()) return Brok({'type': 'alignak_stat', 'data': { 'ts': timestamp, 'type': 'gauge', 'metric': '%s.%s.%s' % (self.statsd_prefix, self.name, key), 'value': value, 'uom': 'g' }}) return None
class Stats(object): '''Stats class to export data into a statsd (or carbon/Graphite) format This class allows to send metrics to a StatsD server using UDP datagrams. Same behavior as:: echo "foo:1|c" | nc -u -w0 127.0.0.1 8125 With the Graphite option, this class stores the metrics in an inner list and flushes the metrics to a Graphite instance when the flush method is called. ''' def __init__(self): pass @property def metrics_count(self): ''' Number of internal stored metrics :return: ''' pass def __repr__(self): pass def register(self, name, _type, statsd_host='localhost', statsd_port=8125, statsd_prefix='alignak', statsd_enabled=False, broks_enabled=False): '''Init instance with real values :param name: daemon name :type name: str :param _type: daemon type :type _type: :param statsd_host: host to post data :type statsd_host: str :param statsd_port: port to post data :type statsd_port: int :param statsd_prefix: prefix to add to metric :type statsd_prefix: str :param statsd_enabled: bool to enable statsd :type statsd_enabled: bool :param broks_enabled: bool to enable broks sending :type broks_enabled: bool :return: None ''' pass def load_statsd(self): '''Create socket connection to statsd host Note that because of the UDP protocol used by StatsD, if no server is listening the socket connection will be accepted anyway :) :return: True if socket got created else False and an exception log is raised ''' pass def connect(self, name, _type, host='localhost', port=2004, prefix='alignak', enabled=False, broks_enabled=False): '''Init instance with real values for a graphite/carbon connection :param name: daemon name :type name: str :param _type: daemon type :type _type: :param host: host to post data :type host: str :param port: port to post data :type port: int :param prefix: prefix to add to metric :type prefix: str :param enabled: bool to enable statsd :type enabled: bool :param broks_enabled: bool to enable broks sending :type broks_enabled: bool :return: None ''' pass def flush(self, log=False): '''Send inner stored metrics to the defined Graphite Returns False if the sending failed with a warning log if log parameter is set :return: bool ''' pass def send_to_graphite(self, metric, value, timestamp=None): ''' Inner store a new metric and flush to Graphite if the flush threshold is reached. If no timestamp is provided, get the current time for the metric timestam. :param metric: metric name in dotted format :type metric: str :param value: :type value: float :param timestamp: metric timestamp :type timestamp: int ''' pass def timer(self, key, value, timestamp=None): '''Set a timer value If the inner key does not exist is is created :param key: timer to update :type key: str :param value: timer value (in seconds) :type value: float :param timestamp: metric timestamp :type timestamp: int :return: An alignak_stat brok if broks are enabled else None ''' pass def counter(self, key, value, timestamp=None): '''Set a counter value If the inner key does not exist is is created :param key: counter to update :type key: str :param value: counter value :type value: float :return: An alignak_stat brok if broks are enabled else None ''' pass def gauge(self, key, value, timestamp=None): '''Set a gauge value If the inner key does not exist is is created :param key: gauge to update :type key: str :param value: counter value :type value: float :return: An alignak_stat brok if broks are enabled else None ''' pass
13
10
45
5
28
13
6
0.48
1
9
2
0
11
20
11
11
515
74
310
50
295
148
250
44
238
12
1
3
68
3,931
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/property.py
alignak.property.PythonizeError
class PythonizeError(Exception): """Simple Exception raise during pythonize call """ pass
class PythonizeError(Exception): '''Simple Exception raise during pythonize call ''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
10
5
1
2
1
1
2
2
1
1
0
3
0
0
3,932
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/scheduler.py
alignak.scheduler.Scheduler
class Scheduler(object): # pylint: disable=too-many-instance-attributes """Scheduler class. Mostly handle scheduling items (host service) to schedule checks raise alerts, manage downtimes, etc.""" def __init__(self, scheduler_daemon): """Receives the daemon this Scheduler is attached to :param scheduler_daemon: schedulerdaemon :type scheduler_daemon: alignak.daemons.schedulerdaemon.Alignak """ self.my_daemon = scheduler_daemon # The scheduling is on/off, default is False self.must_schedule = False # The actions results returned by satelittes or fetched from # passive satellites are stored in this queue self.waiting_results = queue.Queue() # Every N loop turns (usually seconds...) we call functions like consume, del zombies # etc. All of these functions are in recurrent_works with the tick count to run them. # So it must be an integer > 0 # The order is important, so make key an integer index. # TODO: at load, change value by configuration one (like reaper time, etc) self.recurrent_works = { 0: ('update_downtimes_and_comments', self.update_downtimes_and_comments, 1), 1: ('schedule', self.schedule, 1), 2: ('check_freshness', self.check_freshness, 10), 3: ('consume_results', self.consume_results, 1), # now get the news actions (checks, notif) raised 4: ('get_new_actions', self.get_new_actions, 1), 5: ('scatter_master_notifications', self.scatter_master_notifications, 1), 6: ('get_new_broks', self.get_new_broks, 1), # and broks 7: ('delete_zombie_checks', self.delete_zombie_checks, 1), 8: ('delete_zombie_actions', self.delete_zombie_actions, 1), 9: ('clean_caches', self.clean_caches, 1), 10: ('update_retention', self.update_retention, 3600), 11: ('check_orphaned', self.check_orphaned, 60), 12: ('update_program_status', self.update_program_status, 10), 13: ('check_for_system_time_change', self.my_daemon.check_for_system_time_change, 1), 14: ('manage_internal_checks', self.manage_internal_checks, 1), 15: ('clean_queues', self.clean_queues, 1), 16: ('update_business_values', self.update_business_values, 60), 17: ('reset_topology_change_flag', self.reset_topology_change_flag, 1), 18: ('check_for_expire_acknowledge', self.check_for_expire_acknowledge, 1), 19: ('send_broks_to_modules', self.send_broks_to_modules, 1), 20: ('get_objects_from_from_queues', self.get_objects_from_from_queues, 1), 21: ('get_latency_average_percentile', self.get_latency_average_percentile, 10), } # Statistics part # --- # Created items self.nb_checks = 0 self.nb_internal_checks = 0 self.nb_broks = 0 self.nb_notifications = 0 self.nb_event_handlers = 0 self.nb_external_commands = 0 # Launched checks - send to execution to poller/reactionner self.nb_checks_launched = 0 self.nb_actions_launched = 0 # Checks results received self.nb_checks_results = 0 self.nb_checks_results_timeout = 0 self.nb_checks_results_active = 0 self.nb_checks_results_passive = 0 self.nb_actions_results = 0 self.nb_actions_results_timeout = 0 # Dropped elements self.nb_checks_dropped = 0 self.nb_broks_dropped = 0 self.nb_actions_dropped = 0 self.stats = { 'latency': { 'avg': 0.0, 'min': 0.0, 'max': 0.0 } } # Temporary set. Will be updated with the configuration received from our Arbiter self.instance_id = 'uninstantiated' self.instance_name = self.my_daemon.name self.alignak_name = None # And a dummy push flavor self.push_flavor = 0 # Our queues self.checks = {} self.actions = {} # self.program_start = int(time.time()) self.program_start = self.my_daemon.program_start self.pushed_conf = None # Our external commands manager self.external_commands_manager = None # This scheduler has raised the initial broks self.raised_initial_broks = False self.need_dump_environment = False self.need_objects_dump = False @property def name(self): """Get the scheduler name Indeed, we return our suffixed daemon name :return: :rtype: """ return "%s scheduler" % self.my_daemon.name def reset(self): # pylint: disable=not-context-manager """Reset scheduler:: * Remove waiting results * Clear checks and actions lists :return: None """ logger.info("Scheduling loop reset") with self.waiting_results.mutex: self.waiting_results.queue.clear() self.checks.clear() self.actions.clear() def all_my_hosts_and_services(self): """Create an iterator for all my known hosts and services :return: None """ for what in (self.hosts, self.services): for item in what: yield item def load_conf(self, instance_id, instance_name, conf): """Load configuration received from Arbiter and pushed by our Scheduler daemon :param instance_name: scheduler instance name :type instance_name: str :param instance_id: scheduler instance id :type instance_id: str :param conf: configuration to load :type conf: alignak.objects.config.Config :return: None """ self.pushed_conf = conf logger.info("loading my configuration (%s / %s):", instance_id, self.pushed_conf.instance_id) logger.debug("Properties:") for key in sorted(self.pushed_conf.properties): logger.debug("- %s: %s", key, getattr(self.pushed_conf, key, None)) logger.debug("Macros:") for key in sorted(self.pushed_conf.macros): logger.debug("- %s: %s", key, getattr(self.pushed_conf.macros, key, [])) logger.debug("Objects types:") for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()): if strclss in ['arbiters', 'schedulers', 'brokers', 'pollers', 'reactionners', 'receivers']: continue if getattr(self.pushed_conf, strclss, None) is None: logger.debug("- no %s", strclss) continue lst_objects = getattr(self.pushed_conf, strclss, []) setattr(self, strclss, lst_objects) # Internal statistics logger.debug("- %d %s, %d templates", len(getattr(lst_objects, 'items')), strclss, len(getattr(lst_objects, 'templates'))) statsmgr.gauge('configuration.%s' % strclss, len(getattr(lst_objects, 'items'))) statsmgr.gauge('configuration.templates_%s' % strclss, len(getattr(lst_objects, 'templates'))) # We need reversed list for searching in the retention file read # todo: check what it is about... self.services.optimize_service_search(self.hosts) # Just deprecated # # Compile the triggers # if getattr(self, 'triggers', None): # logger.info("compiling the triggers...") # self.triggers.compile() # self.triggers.load_objects(self) # else: # logger.info("No triggers") # From the Arbiter configuration. Used for satellites to differentiate the schedulers self.alignak_name = self.pushed_conf.alignak_name self.instance_id = instance_id self.instance_name = instance_name self.push_flavor = getattr(self.pushed_conf, 'push_flavor', 'None') logger.info("Set my scheduler instance: %s - %s - %s", self.instance_id, self.instance_name, self.push_flavor) # Tag our monitored hosts/services with our instance_id for item in self.all_my_hosts_and_services(): item.instance_id = self.instance_id def update_recurrent_works_tick(self, conf): """Modify the tick value for the scheduler recurrent work A tick is an amount of loop of the scheduler before executing the recurrent work The provided configuration may contain some tick-function_name keys that contain a tick value to be updated. Those parameters are defined in the alignak environment file. Indeed this function is called with the Scheduler daemon object. Note that the ``conf`` parameter may also be a dictionary. :param conf: the daemon link configuration to search in :type conf: alignak.daemons.schedulerdaemon.Alignak :return: None """ for key in self.recurrent_works: (name, fun, _) = self.recurrent_works[key] if isinstance(conf, dict): new_tick = conf.get('tick_%s' % name, None) else: new_tick = getattr(conf, 'tick_%s' % name, None) if new_tick is not None: logger.debug("Requesting to change the default tick to %d for the action %s", int(new_tick), name) else: continue # Update the default scheduler tick for this function try: new_tick = int(new_tick) logger.info("Changing the default tick to %d for the action %s", new_tick, name) self.recurrent_works[key] = (name, fun, new_tick) except ValueError: logger.warning("Changing the default tick for '%s' to '%s' failed!", new_tick, name) def start_scheduling(self): """Set must_schedule attribute to True - enable the scheduling loop :return: None """ logger.info("Enabling the scheduling loop...") self.must_schedule = True def stop_scheduling(self): """Set must_schedule attribute to False - disable the scheduling loop :return: None """ logger.info("Disabling the scheduling loop...") self.must_schedule = False def dump_objects(self): """Dump scheduler objects into a dump (temp) file :return: None """ path = os.path.join(tempfile.gettempdir(), 'dump-obj-scheduler-%s-%d.json' % (self.name, int(time.time()))) logger.info('Dumping scheduler objects to: %s', path) try: fd = open(path, 'wb') output = 'type:uuid:status:t_to_go:poller_tag:worker:command\n' fd.write(output.encode('utf-8')) for check in list(self.checks.values()): output = 'check:%s:%s:%s:%s:%s:%s\n' \ % (check.uuid, check.status, check.t_to_go, check.poller_tag, check.command, check.my_worker) fd.write(output.encode('utf-8')) logger.info('- dumped checks') for action in list(self.actions.values()): output = '%s: %s:%s:%s:%s:%s:%s\n'\ % (action.__class__.my_type, action.uuid, action.status, action.t_to_go, action.reactionner_tag, action.command, action.my_worker) fd.write(output.encode('utf-8')) logger.info('- dumped actions') broks = [] for broker in list(self.my_daemon.brokers.values()): for brok in broker.broks: broks.append(brok) for brok in broks: output = 'BROK: %s:%s\n' % (brok.uuid, brok.type) fd.write(output.encode('utf-8')) logger.info('- dumped broks') fd.close() logger.info('Dumped') except OSError as exp: # pragma: no cover, should never happen... logger.critical("Error when writing the objects dump file %s : %s", path, str(exp)) def dump_config(self): """Dump scheduler configuration into a temporary file The dumped content is JSON formatted :return: None """ path = os.path.join(tempfile.gettempdir(), 'dump-cfg-scheduler-%s-%d.json' % (self.name, int(time.time()))) try: self.pushed_conf.dump(path) except (OSError, IndexError) as exp: # pragma: no cover, should never happen... logger.critical("Error when writing the configuration dump file %s: %s", path, str(exp)) def run_external_commands(self, cmds): """Run external commands Arbiter/Receiver sent :param cmds: commands to run :type cmds: list :return: None """ if not self.external_commands_manager: return try: _t0 = time.time() logger.debug("Scheduler '%s' got %d commands", self.name, len(cmds)) for command in cmds: self.external_commands_manager.resolve_command(ExternalCommand(command)) statsmgr.counter('external-commands.got.count', len(cmds)) statsmgr.timer('external-commands.got.time', time.time() - _t0) except Exception as exp: # pylint: disable=broad-except logger.warning("External command parsing error: %s", exp) logger.warning("Exception: %s / %s", str(exp), traceback.print_exc()) for command in cmds: try: command = command.decode('utf8', 'ignore') except UnicodeEncodeError: pass except AttributeError: pass logger.warning("Command: %s", command) def add_brok(self, brok, broker_uuid=None): """Add a brok into brokers list It can be for a specific one, all brokers or none (startup) :param brok: brok to add :type brok: alignak.brok.Brok :param broker_uuid: broker uuid for the brok :type broker_uuid: str :return: None """ # We tag the brok with our instance_id brok.instance_id = self.instance_id if brok.type == 'monitoring_log': # The brok is a monitoring event with self.my_daemon.events_lock: self.my_daemon.events.append(brok) statsmgr.counter('events', 1) # return if broker_uuid: if broker_uuid not in self.my_daemon.brokers: logger.info("Unknown broker: %s / %s!", broker_uuid, self.my_daemon.brokers) return broker_link = self.my_daemon.brokers[broker_uuid] logger.debug("Adding a brok %s for: %s", brok.type, broker_uuid) # it's just for one broker self.my_daemon.brokers[broker_link.uuid].broks.append(brok) self.nb_broks += 1 else: logger.debug("Adding a brok %s to all brokers", brok.type) # add brok to all brokers for broker_link_uuid in self.my_daemon.brokers: logger.debug("- adding to %s", self.my_daemon.brokers[broker_link_uuid]) self.my_daemon.brokers[broker_link_uuid].broks.append(brok) self.nb_broks += 1 def add_notification(self, notification): """Add a notification into actions list :param notification: notification to add :type notification: alignak.notification.Notification :return: None """ if notification.uuid in self.actions: logger.warning("Already existing notification: %s", notification) return logger.debug("Adding a notification: %s", notification) self.actions[notification.uuid] = notification self.nb_notifications += 1 # A notification which is not a master one raises a brok if notification.contact is not None: self.add(notification.get_initial_status_brok()) def add_check(self, check): """Add a check into the scheduler checks list :param check: check to add :type check: alignak.check.Check :return: None """ if check is None: return if check.uuid in self.checks: logger.debug("Already existing check: %s", check) return logger.debug("Adding a check: %s", check) # Add a new check to the scheduler checks list self.checks[check.uuid] = check self.nb_checks += 1 # Raise a brok to inform about a next check is to come ... # but only for items that are actively checked item = self.find_item_by_id(check.ref) if item.active_checks_enabled: self.add(item.get_next_schedule_brok()) def add_event_handler(self, action): """Add a event handler into actions list :param action: event handler to add :type action: alignak.eventhandler.EventHandler :return: None """ if action.uuid in self.actions: logger.info("Already existing event handler: %s", action) return self.actions[action.uuid] = action self.nb_event_handlers += 1 def add_external_command(self, ext_cmd): """Resolve external command :param ext_cmd: extermal command to run :type ext_cmd: alignak.external_command.ExternalCommand :return: None """ self.external_commands_manager.resolve_command(ext_cmd) self.nb_external_commands += 1 def add(self, elt): """Generic function to add objects into the scheduler daemon internal lists:: Brok -> self.broks Check -> self.checks Notification -> self.actions EventHandler -> self.actions For an ExternalCommand, tries to resolve the command :param elt: element to add :type elt: :return: None """ if elt is None: return logger.debug("Adding: %s / %s", elt.my_type, elt.__dict__) fun = self.__add_actions.get(elt.__class__, None) if fun: fun(self, elt) else: logger.warning("self.add(): Unmanaged object class: %s (object=%r)", elt.__class__, elt) __add_actions = { Check: add_check, Brok: add_brok, Notification: add_notification, EventHandler: add_event_handler, ExternalCommand: add_external_command, } def hook_point(self, hook_name): """Generic function to call modules methods if such method is avalaible :param hook_name: function name to call :type hook_name: str :return:None """ self.my_daemon.hook_point(hook_name=hook_name, handle=self) def clean_queues(self): # pylint: disable=too-many-locals """Reduces internal list size to max allowed * checks and broks : 5 * length of hosts + services * actions : 5 * length of hosts + services + contacts :return: None """ # If we set the interval at 0, we bail out if getattr(self.pushed_conf, 'tick_clean_queues', 0) == 0: logger.debug("No queues cleaning...") return max_checks = MULTIPLIER_MAX_CHECKS * (len(self.hosts) + len(self.services)) max_broks = MULTIPLIER_MAX_BROKS * (len(self.hosts) + len(self.services)) max_actions = MULTIPLIER_MAX_ACTIONS * len(self.contacts) * (len(self.hosts) + len(self.services)) # For checks, it's not very simple: # For checks, they may be referred to their host/service # We do not just del them in the check list, but also in their service/host # We want id of lower than max_id - 2*max_checks self.nb_checks_dropped = 0 if max_checks and len(self.checks) > max_checks: # keys does not ensure sorted keys. Max is slow but we have no other way. to_del_checks = [c for c in list(self.checks.values())] to_del_checks.sort(key=lambda x: x.creation_time) to_del_checks = to_del_checks[:-max_checks] self.nb_checks_dropped = len(to_del_checks) if to_del_checks: logger.warning("I have to drop some checks (%d)..., sorry :(", self.nb_checks_dropped) for chk in to_del_checks: c_id = chk.uuid items = getattr(self, chk.ref_type + 's') elt = items[chk.ref] # First remove the link in host/service elt.remove_in_progress_check(chk) # Then in dependent checks (I depend on, or check # depend on me) for dependent_checks in chk.depend_on_me: dependent_checks.depend_on.remove(chk.uuid) for c_temp in chk.depend_on: c_temp.depend_on_me.remove(chk) del self.checks[c_id] # Final Bye bye ... # For broks and actions, it's more simple # or broks, manage global but also all brokers self.nb_broks_dropped = 0 for broker_link in list(self.my_daemon.brokers.values()): if max_broks and len(broker_link.broks) > max_broks: logger.warning("I have to drop some broks (%d > %d) for the broker %s " "..., sorry :(", len(broker_link.broks), max_broks, broker_link) kept_broks = sorted(broker_link.broks, key=lambda x: x.creation_time) # Delete the oldest broks to keep the max_broks most recent... # todo: is it a good choice ! broker_link.broks = kept_broks[0:max_broks] self.nb_actions_dropped = 0 if max_actions and len(self.actions) > max_actions: logger.warning("I have to del some actions (currently: %d, max: %d)..., sorry :(", len(self.actions), max_actions) to_del_actions = [c for c in list(self.actions.values())] to_del_actions.sort(key=lambda x: x.creation_time) to_del_actions = to_del_actions[:-max_actions] self.nb_actions_dropped = len(to_del_actions) for act in to_del_actions: if act.is_a == 'notification': self.find_item_by_id(act.ref).remove_in_progress_notification(act) del self.actions[act.uuid] def clean_caches(self): """Clean timperiods caches :return: None """ for timeperiod in self.timeperiods: timeperiod.clean_cache() def get_and_register_status_brok(self, item): """Get a update status brok for item and add it :param item: item to get brok from :type item: alignak.objects.item.Item :return: None """ self.add(item.get_update_status_brok()) def get_and_register_check_result_brok(self, item): """Get a check result brok for item and add it :param item: item to get brok from :type item: alignak.objects.schedulingitem.SchedulingItem :return: None """ self.add(item.get_check_result_brok()) def check_for_expire_acknowledge(self): """Iter over host and service and check if any acknowledgement has expired :return: None """ for elt in self.all_my_hosts_and_services(): elt.check_for_expire_acknowledge() def update_business_values(self): """Iter over host and service and update business_impact :return: None """ for elt in self.all_my_hosts_and_services(): if not elt.is_problem: was = elt.business_impact elt.update_business_impact_value(self.hosts, self.services, self.timeperiods, self.businessimpactmodulations) new = elt.business_impact # Ok, the business_impact change, we can update the broks if new != was: self.get_and_register_status_brok(elt) # When all impacts and classic elements are updated, # we can update problems (their value depend on impacts, so # they must be done after) for elt in self.all_my_hosts_and_services(): # We first update impacts and classic elements if elt.is_problem: was = elt.business_impact elt.update_business_impact_value(self.hosts, self.services, self.timeperiods, self.businessimpactmodulations) new = elt.business_impact # Maybe one of the impacts change it's business_impact to a high value # and so ask for the problem to raise too if new != was: self.get_and_register_status_brok(elt) def scatter_master_notifications(self): """Generate children notifications from a master notification Also update notification number Master notification are raised when a notification must be sent out. They are not launched by reactionners (only children are) but they are used to build the children notifications. From one master notification, several children notifications may be built, indeed one per each contact... :return: None """ now = time.time() # We only want the master scheduled notifications that are immediately launchable notifications = [a for a in self.actions.values() if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED and not a.contact and a.is_launchable(now)] if notifications: logger.debug("Scatter master notification: %d notifications", len(notifications)) for notification in notifications: logger.debug("Scheduler got a master notification: %s", notification) # This is a "master" notification created by an host/service. # We use it to create children notifications (for the contacts and # notification_commands) which are executed in the reactionner. item = self.find_item_by_id(notification.ref) children = [] notification_period = None if getattr(item, 'notification_period', None) is not None: notification_period = self.timeperiods[item.notification_period] if not item.is_blocking_notifications(notification_period, self.hosts, self.services, notification.type, now): # If it is possible to send notifications # of this type at the current time, then create # a single notification for each contact of this item. children = item.scatter_notification( notification, self.contacts, self.notificationways, self.timeperiods, self.macromodulations, self.escalations, self.find_item_by_id(getattr(item, "host", None)) ) for notif in children: logger.debug(" - child notification: %s", notif) notif.status = ACT_STATUS_SCHEDULED # Add the notification to the scheduler objects self.add(notif) # If we have notification_interval then schedule # the next notification (problems only) if notification.type == u'PROBLEM': # Update the ref notif number after raise the one of the notification if children: # notif_nb of the master notification # was already current_notification_number+1. # If notifications were sent, # then host/service-counter will also be incremented item.current_notification_number = notification.notif_nb if item.notification_interval and notification.t_to_go is not None: # We must continue to send notifications. # Just leave it in the actions list and set it to "scheduled" # and it will be found again later # Ask the service/host to compute the next notif time. It can be just # a.t_to_go + item.notification_interval*item.__class__.interval_length # or maybe before because we have an # escalation that need to raise up before notification.t_to_go = item.get_next_notification_time(notification, self.escalations, self.timeperiods) notification.notif_nb = item.current_notification_number + 1 logger.debug("Repeat master notification: %s", notification) else: # Wipe out this master notification. It is a master one item.remove_in_progress_notification(notification) logger.debug("Remove master notification (no repeat): %s", notification) else: # Wipe out this master notification. logger.debug("Remove master notification (no more a problem): %s", notification) # We don't repeat recover/downtime/flap/etc... item.remove_in_progress_notification(notification) def get_to_run_checks(self, do_checks=False, do_actions=False, poller_tags=None, reactionner_tags=None, worker_name='none', module_types=None): # pylint: disable=too-many-branches """Get actions/checks for reactionner/poller Called by the poller to get checks (do_checks=True) and by the reactionner (do_actions=True) to get actions :param do_checks: do we get checks ? :type do_checks: bool :param do_actions: do we get actions ? :type do_actions: bool :param poller_tags: poller tags to filter :type poller_tags: list :param reactionner_tags: reactionner tags to filter :type reactionner_tags: list :param worker_name: worker name to fill check/action (to remember it) :type worker_name: str :param module_types: module type to filter :type module_types: list :return: Check/Action list with poller/reactionner tags matching and module type matching :rtype: list """ res = [] now = time.time() if poller_tags is None: poller_tags = ['None'] if reactionner_tags is None: reactionner_tags = ['None'] if module_types is None: module_types = ['fork'] if not isinstance(module_types, list): module_types = [module_types] # If a poller wants its checks if do_checks: if self.checks: logger.debug("I have %d prepared checks", len(self.checks)) for check in list(self.checks.values()): logger.debug("Check: %s (%s / %s)", check.uuid, check.poller_tag, check.module_type) if check.internal: # Do not care about Alignak internally executed checks continue # If the command is untagged, and the poller too, or if both are tagged # with same name, go for it # if do_check, call for poller, and so poller_tags by default is ['None'] # by default poller_tag is 'None' and poller_tags is ['None'] # and same for module_type, the default is the 'fork' type if check.poller_tag not in poller_tags: logger.debug(" -> poller tag do not match") continue if check.module_type not in module_types: logger.debug(" -> module type do not match") continue logger.debug(" -> : %s %s (%s)", 'worker' if not check.internal else 'internal', check.status, 'now' if check.is_launchable(now) else 'not yet') if check._is_orphan and check.status == ACT_STATUS_SCHEDULED \ and os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- orphan check: %s -> : %s %s (%s)", check, 'worker' if not check.internal else 'internal', check.status, 'now' if check.is_launchable(now) else 'not yet') # must be ok to launch, and not an internal one (business rules based) if check.status == ACT_STATUS_SCHEDULED and check.is_launchable(now): logger.debug("Check to run: %s", check) check.status = ACT_STATUS_POLLED check.my_worker = worker_name res.append(check) # Stats self.nb_checks_launched += 1 if 'ALIGNAK_LOG_ACTIONS' in os.environ: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Check to run: %s", check) else: logger.info("Check to run: %s", check) if res: logger.debug("-> %d checks to start now", len(res)) else: logger.debug("-> no checks to start now") # If a reactionner wants its actions if do_actions: if self.actions: logger.debug("I have %d prepared actions", len(self.actions)) for action in list(self.actions.values()): logger.debug("Action: %s (%s / %s)", action.uuid, action.reactionner_tag, action.module_type) if action.internal: # Do not care about Alignak internally executed checks continue is_master = (action.is_a == 'notification' and not action.contact) if is_master: continue # if do_action, call the reactionner, # and so reactionner_tags by default is ['None'] # by default reactionner_tag is 'None' and reactionner_tags is ['None'] too # So if not the good one, loop for next :) if action.reactionner_tag not in reactionner_tags: logger.debug(" -> reactionner tag do not match") continue # same for module_type if action.module_type not in module_types: logger.debug(" -> module type do not match") continue # And now look if we can launch or not :) logger.debug(" -> : worker %s (%s)", action.status, 'now' if action.is_launchable(now) else 'not yet') if action._is_orphan and action.status == ACT_STATUS_SCHEDULED and \ os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- orphan action: %s", action) if action.status == ACT_STATUS_SCHEDULED and action.is_launchable(now): # This is for child notifications and eventhandlers action.status = ACT_STATUS_POLLED action.my_worker = worker_name res.append(action) # Stats self.nb_actions_launched += 1 if 'ALIGNAK_LOG_ACTIONS' in os.environ: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Action to run: %s", action) else: logger.info("Action to run: %s", action) if res: logger.debug("-> %d actions to start now", len(res)) else: logger.debug("-> no actions to start now") return res def manage_results(self, action): # pylint: disable=too-many-branches,too-many-statements """Get result from pollers/reactionners (actives ones) :param action: check / action / event handler to handle :type action: :return: None """ logger.debug('manage_results: %s ', action) if action.is_a == 'notification': try: _ = self.actions[action.uuid] except KeyError as exp: # pragma: no cover, simple protection # Cannot find notification - drop it logger.warning('manage_results:: get unknown notification : %s ', str(exp)) for uuid in self.actions: logger.debug('manage_results:: known action: %s ', self.actions[uuid]) return # We will only see child notifications here try: timeout = False execution_time = 0 if action.status == ACT_STATUS_TIMEOUT: # Unfortunately the remove_in_progress_notification # sets the status to zombie, so we need to save it here. timeout = True execution_time = action.execution_time # Add protection for strange charset try: action.output = action.output.decode('utf8', 'ignore') except UnicodeDecodeError: pass except AttributeError: # Python 3 will raise an exception pass self.actions[action.uuid].get_return_from(action) item = self.find_item_by_id(self.actions[action.uuid].ref) item.remove_in_progress_notification(action) self.actions[action.uuid].status = ACT_STATUS_ZOMBIE item.last_notification = int(action.check_time) # And we ask the item to update its state self.get_and_register_status_brok(item) # If we' ve got a problem with the notification, raise a Warning log if timeout: contact = self.find_item_by_id(self.actions[action.uuid].contact) item = self.find_item_by_id(self.actions[action.uuid].ref) self.nb_actions_results_timeout += 1 logger.warning("Contact %s %s notification command '%s ' " "timed out after %.2f seconds", contact.contact_name, item.my_type, self.actions[action.uuid].command, execution_time) else: self.nb_actions_results += 1 if action.exit_status != 0: logger.warning("The notification command '%s' raised an error " "(exit code=%d): '%s'", action.command, action.exit_status, action.output) except (ValueError, AttributeError) as exp: # pragma: no cover, simple protection # bad object, drop it logger.warning('manage_results:: got bad notification : %s ', str(exp)) elif action.is_a == 'check': try: check = self.checks[action.uuid] except KeyError as exp: # pragma: no cover, simple protection # Cannot find check - drop it logger.warning('manage_results:: get unknown check: %s ', action) for uuid in self.checks: logger.debug('manage_results:: known check: %s ', self.checks[uuid]) return try: if action.status == ACT_STATUS_TIMEOUT: ref = self.find_item_by_id(check.ref) action.long_output = action.output action.output = "(%s %s check timed out)" % (ref.my_type, ref.get_full_name()) action.exit_status = self.pushed_conf.timeout_exit_status self.nb_checks_results_timeout += 1 logger.warning("Timeout raised for '%s' (check command for the %s '%s'), " "check status code: %d, execution time: %d seconds", action.command, ref.my_type, ref.get_full_name(), action.exit_status, int(action.execution_time)) else: self.nb_checks_results += 1 if action.passive_check: self.nb_checks_results_passive += 1 else: self.nb_checks_results_active += 1 check.get_return_from(action) check.status = ACT_STATUS_WAIT_CONSUME if check._is_orphan and os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- got a result for an orphan check: %s", check) except (ValueError, AttributeError) as exp: # pragma: no cover, simple protection # bad object, drop it logger.warning('manage_results:: got bad check: %s ', str(exp)) elif action.is_a == 'eventhandler': try: old_action = self.actions[action.uuid] old_action.status = ACT_STATUS_ZOMBIE except KeyError as exp: # pragma: no cover, simple protection # cannot find old action # bad object, drop it logger.warning('manage_results:: get bad check: %s ', str(exp)) return try: if action.status == ACT_STATUS_TIMEOUT: _type = 'event handler' if action.is_snapshot: _type = 'snapshot' ref = self.find_item_by_id(self.checks[action.uuid].ref) logger.info("%s %s command '%s' timed out after %d seconds", ref.__class__.my_type.capitalize(), # pylint: disable=E1101 _type, self.actions[action.uuid].command, int(action.execution_time)) self.nb_actions_results_timeout += 1 else: self.nb_actions_results += 1 # If it's a snapshot we should get the output and export it if action.is_snapshot: old_action.get_return_from(action) s_item = self.find_item_by_id(old_action.ref) self.add(s_item.get_snapshot_brok(old_action.output, old_action.exit_status)) except (ValueError, AttributeError) as exp: # pragma: no cover, simple protection # bad object, drop it logger.warning('manage_results:: got bad event handler: %s ', str(exp)) else: # pragma: no cover, simple protection, should not happen! logger.error("The received result type in unknown! %s", str(action.is_a)) def push_actions_to_passive_satellites(self): """Send actions/checks to passive poller/reactionners :return: None """ # We loop for our passive pollers or reactionners for satellites in [self.my_daemon.pollers, self.my_daemon.reactionners]: s_type = 'poller' if satellites is self.my_daemon.reactionners: s_type = 'reactionner' for link in [s for s in list(satellites.values()) if s.passive]: logger.debug("Try to send actions to the %s '%s'", s_type, link.name) # Get actions to execute lst = [] if s_type == 'poller': lst = self.get_to_run_checks(do_checks=True, do_actions=False, poller_tags=link.poller_tags, worker_name=link.name) elif s_type == 'reactionner': lst = self.get_to_run_checks(do_checks=False, do_actions=True, reactionner_tags=link.reactionner_tags, worker_name=link.name) if not lst: logger.debug("Nothing to do...") continue logger.debug("Sending %d actions to the %s '%s'", len(lst), s_type, link.name) link.push_actions(lst, self.instance_id) def get_results_from_passive_satellites(self): # pylint: disable=broad-except """Get actions/checks results from passive poller/reactionners :return: None """ # We loop for our passive pollers or reactionners for satellites in [self.my_daemon.pollers, self.my_daemon.reactionners]: s_type = 'poller' if satellites is self.my_daemon.reactionners: s_type = 'reactionner' for link in [s for s in list(satellites.values()) if s.passive]: logger.debug("Trying to get results from the %s '%s'", s_type, link.name) results = link.get_results(self.instance_id) if results: logger.debug("Got some results: %d results from %s", len(results), link.name) else: logger.debug("-> no passive results from %s", link.name) continue results = unserialize(results, no_json=True) if results: logger.debug("Received %d passive results from %s", len(results), link.name) for result in results: logger.debug("-> result: %s", result) # Append to the scheduler result queue self.waiting_results.put(result) def manage_internal_checks(self): """Run internal checks :return: None """ if os.getenv('ALIGNAK_MANAGE_INTERNAL', '1') != '1': return now = time.time() for chk in list(self.checks.values()): if not chk.internal: # Exclude checks that are not internal ones continue # Exclude checks that are not yet ready to launch if not chk.is_launchable(now) or chk.status not in [ACT_STATUS_SCHEDULED]: continue item = self.find_item_by_id(chk.ref) # Only if active checks are enabled if not item or not item.active_checks_enabled: # Ask to remove the check chk.status = ACT_STATUS_ZOMBIE continue logger.debug("Run internal check for %s", item) self.nb_internal_checks += 1 # Execute internal check item.manage_internal_check(self.hosts, self.services, chk, self.hostgroups, self.servicegroups, self.macromodulations, self.timeperiods) # Ask to consume the check result chk.status = ACT_STATUS_WAIT_CONSUME def reset_topology_change_flag(self): """Set topology_change attribute to False in all hosts and services :return: None """ for i in self.hosts: i.topology_change = False for i in self.services: i.topology_change = False def update_retention(self): """Call hook point 'save_retention'. Retention modules will write back retention (to file, db etc) :param forced: is update forced? :type forced: bool :return: None """ # If we set the retention update to 0, we do not want to manage retention # If we are not forced (like at stopping) if self.pushed_conf.retention_update_interval == 0: logger.debug("Should have saved retention but it is not enabled") return _t0 = time.time() self.hook_point('save_retention') statsmgr.timer('hook.retention-save', time.time() - _t0) self.add(make_monitoring_log('INFO', 'RETENTION SAVE: %s' % self.my_daemon.name)) logger.info('Retention data saved: %.2f seconds', time.time() - _t0) def retention_load(self, forced=False): """Call hook point 'load_retention'. Retention modules will read retention (from file, db etc) :param forced: is load forced? :type forced: bool :return: None """ # If we set the retention update to 0, we do not want to manage retention # If we are not forced (like at stopping) if self.pushed_conf.retention_update_interval == 0 and not forced: logger.debug("Should have loaded retention but it is not enabled") return _t0 = time.time() self.hook_point('load_retention') statsmgr.timer('hook.retention-load', time.time() - _t0) self.add(make_monitoring_log('INFO', 'RETENTION LOAD: %s' % self.my_daemon.name)) logger.info('Retention data loaded: %.2f seconds', time.time() - _t0) def log_initial_states(self): """Raise hosts and services initial status logs First, raise hosts status and then services. This to allow the events log to be a little sorted. :return: None """ # Raise hosts initial status broks for elt in self.hosts: elt.raise_initial_state() # And then services initial status broks for elt in self.services: elt.raise_initial_state() def get_retention_data(self): # pylint: disable=too-many-branches,too-many-statements # pylint: disable=too-many-locals """Get all hosts and services data to be sent to the retention storage. This function only prepares the data because a module is in charge of making the data survive to the scheduler restart. todo: Alignak scheduler creates two separate dictionaries: hosts and services It would be better to merge the services into the host dictionary! :return: dict containing host and service data :rtype: dict """ retention_data = { 'hosts': {}, 'services': {} } for host in self.hosts: h_dict = {} # Get the hosts properties and running properties properties = host.__class__.properties properties.update(host.__class__.running_properties) for prop, entry in list(properties.items()): if not entry.retention: continue val = getattr(host, prop) # If a preparation function exists... prepare_retention = entry.retention_preparation if prepare_retention: val = prepare_retention(host, val) h_dict[prop] = val retention_data['hosts'][host.host_name] = h_dict logger.info('%d hosts sent to retention', len(retention_data['hosts'])) # Same for services for service in self.services: s_dict = {} # Get the services properties and running properties properties = service.__class__.properties properties.update(service.__class__.running_properties) for prop, entry in list(properties.items()): if not entry.retention: continue val = getattr(service, prop) # If a preparation function exists... prepare_retention = entry.retention_preparation if prepare_retention: val = prepare_retention(service, val) s_dict[prop] = val retention_data['services'][(service.host_name, service.service_description)] = s_dict logger.info('%d services sent to retention', len(retention_data['services'])) return retention_data def restore_retention_data(self, data): """Restore retention data Data coming from retention will override data coming from configuration It is kinda confusing when you modify an attribute (external command) and it get saved by retention :param data: data from retention :type data: dict :return: None """ if 'hosts' not in data: logger.warning("Retention data are not correct, no 'hosts' property!") return for host_name in data['hosts']: # We take the dict of our value to load host = self.hosts.find_by_name(host_name) if host is not None: self.restore_retention_data_item(data['hosts'][host_name], host) statsmgr.gauge('retention.hosts', len(data['hosts'])) logger.info('%d hosts restored from retention', len(data['hosts'])) # Same for services for (host_name, service_description) in data['services']: # We take our dict to load service = self.services.find_srv_by_name_and_hostname(host_name, service_description) if service is not None: self.restore_retention_data_item(data['services'][(host_name, service_description)], service) statsmgr.gauge('retention.services', len(data['services'])) logger.info('%d services restored from retention', len(data['services'])) def restore_retention_data_item(self, data, item): # pylint: disable=too-many-branches, too-many-locals """ Restore data in item :param data: retention data of the item :type data: dict :param item: host or service item :type item: alignak.objects.host.Host | alignak.objects.service.Service :return: None """ # Manage the properties and running properties properties = item.__class__.properties properties.update(item.__class__.running_properties) for prop, entry in list(properties.items()): if not entry.retention: continue if prop not in data: continue # If a restoration function exists... restore_retention = entry.retention_restoration if restore_retention: setattr(item, prop, restore_retention(item, data[prop])) else: setattr(item, prop, data[prop]) # Now manage all linked objects load from/ previous run for notification_uuid in item.notifications_in_progress: notification = item.notifications_in_progress[notification_uuid] # Update the notification referenced object notification['ref'] = item.uuid my_notification = Notification(params=notification) item.notifications_in_progress[notification_uuid] = my_notification # Add a notification in the scheduler actions self.add(my_notification) # todo: is it useful? We do not save/restore checks in the retention data... item.update_in_checking() # And also add downtimes and comments # Downtimes are in a list.. for downtime_uuid in data['downtimes']: downtime = data['downtimes'][downtime_uuid] # Update the downtime referenced object downtime['ref'] = item.uuid my_downtime = Downtime(params=downtime) if downtime['comment_id']: if downtime['comment_id'] not in data['comments']: downtime['comment_id'] = '' # case comment_id has comment dict instead uuid # todo: This should never happen! Why this code ? if 'uuid' in downtime['comment_id']: data['comments'].append(downtime['comment_id']) downtime['comment_id'] = downtime['comment_id']['uuid'] item.add_downtime(my_downtime) # Comments are in a list.. for comment_uuid in data['comments']: comment = data['comments'][comment_uuid] # Update the comment referenced object comment['ref'] = item.uuid item.add_comment(Comment(comment)) if item.acknowledgement is not None: # Update the comment referenced object item.acknowledgement['ref'] = item.uuid item.acknowledgement = Acknowledge(item.acknowledgement) # Relink the notified_contacts as a list of contacts objects # if it was loaded from the retention, it's now a list of contacts # names new_notified_contacts = [] new_notified_contacts_ids = [] for contact_name in item.notified_contacts: contact = self.contacts.find_by_name(contact_name) if contact is None: logger.warning('Restored an unknown contact from the retention: %s', contact_name) continue if contact_name not in new_notified_contacts: new_notified_contacts.append(contact_name) if contact.uuid not in new_notified_contacts_ids: new_notified_contacts_ids.append(contact.uuid) item.notified_contacts = new_notified_contacts item.notified_contacts_ids = new_notified_contacts_ids def fill_initial_broks(self, broker_name): # pylint: disable=too-many-branches """Create initial broks for a specific broker :param broker_name: broker name :type broker_name: str :return: number of created broks """ broker_uuid = None logger.debug("My brokers: %s", self.my_daemon.brokers) for broker_link in list(self.my_daemon.brokers.values()): logger.debug("Searching broker: %s", broker_link) if broker_name == broker_link.name: broker_uuid = broker_link.uuid logger.info("Filling initial broks for %s", broker_name) break else: if self.pushed_conf: # I am yet configured but I do not know this broker ! Something went wrong!!! logger.error("Requested initial broks for an unknown broker: %s", broker_name) else: logger.info("Requested initial broks for an unknown broker: %s", broker_name) return 0 if self.my_daemon.brokers[broker_uuid].initialized: logger.warning("The broker %s still got its initial broks...", broker_name) return 0 initial_broks_count = len(self.my_daemon.brokers[broker_uuid].broks) # First the program status self.add_brok(self.get_program_status_brok(), broker_uuid) self.pushed_conf.skip_initial_broks = getattr(self.pushed_conf, 'skip_initial_broks', False) logger.debug("Skipping initial broks? %s", str(self.pushed_conf.skip_initial_broks)) if not self.pushed_conf.skip_initial_broks: # Get initial_status broks for all these types of objects # The order is important, service need host... for t in [self.timeperiods, self.commands, self.notificationways, self.contacts, self.contactgroups, self.hosts, self.hostgroups, self.hostdependencies, self.services, self.servicegroups, self.servicedependencies, self.escalations]: if not t: continue for item in t: members = None if isinstance(item, Hostgroup): members = self.hosts if isinstance(item, Servicegroup): members = self.services if isinstance(item, Contactgroup): members = self.contacts self.add_brok(item.get_initial_status_brok(members), broker_uuid) # Get initial_status broks for all these types of templates # The order is important, service need host... for t in [self.contacts, self.hosts, self.services]: if not t: continue for item_uuid in t.templates: item = t.templates[item_uuid] self.add_brok(item.get_initial_status_brok(extra=None), broker_uuid) # Add a brok to say that we finished all initial_pass brok = Brok({'type': 'initial_broks_done', 'data': {'instance_id': self.instance_id}}) self.add_brok(brok, broker_uuid) final_broks_count = len(self.my_daemon.brokers[broker_uuid].broks) self.my_daemon.brokers[broker_uuid].initialized = True # Send the initial broks to our modules self.send_broks_to_modules() # We now have raised all the initial broks self.raised_initial_broks = True logger.info("Created %d initial broks for %s", final_broks_count - initial_broks_count, broker_name) return final_broks_count - initial_broks_count def initial_program_status(self): """Create and add a program_status brok :return: None """ self.add(self.get_program_status_brok(brok_type='program_status')) def update_program_status(self): """Create and add a update_program_status brok :return: None """ self.add(self.get_program_status_brok(brok_type='update_program_status')) def get_program_status_brok(self, brok_type='program_status'): """Create a program status brok Initially builds the running properties and then, if initial status brok, get the properties from the Config class where an entry exist for the brok 'full_status' :return: Brok with program status data :rtype: alignak.brok.Brok """ # Get the running statistics data = { "is_running": True, "instance_id": self.instance_id, # "alignak_name": self.alignak_name, "instance_name": self.name, "last_alive": time.time(), "pid": os.getpid(), '_running': self.get_scheduler_stats(details=True), '_config': {}, '_macros': {} } # Get configuration data from the pushed configuration cls = self.pushed_conf.__class__ for prop, entry in list(cls.properties.items()): # Is this property intended for broking? if FULL_STATUS not in entry.fill_brok: continue data['_config'][prop] = self.pushed_conf.get_property_value_for_brok( prop, cls.properties) # data['_config'][prop] = getattr(self.pushed_conf, prop, entry.default) # Get the macros from the pushed configuration and try to resolve # the macros to provide the result in the status brok macro_resolver = MacroResolver() macro_resolver.init(self.pushed_conf) for macro_name in sorted(self.pushed_conf.macros): data['_macros'][macro_name] = \ macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name, [], None, None) logger.debug("Program status brok %s data: %s", brok_type, data) return Brok({'type': brok_type, 'data': data}) def consume_results(self): # pylint: disable=too-many-branches """Handle results waiting in waiting_results list. Check ref will call consume result and update their status :return: None """ # All results are in self.waiting_results # We need to get them first queue_size = self.waiting_results.qsize() for _ in range(queue_size): self.manage_results(self.waiting_results.get()) # Then we consume them for chk in list(self.checks.values()): if chk.status == ACT_STATUS_WAIT_CONSUME: logger.debug("Consuming: %s", chk) item = self.find_item_by_id(chk.ref) notification_period = None if getattr(item, 'notification_period', None) is not None: notification_period = self.timeperiods[item.notification_period] dep_checks = item.consume_result(chk, notification_period, self.hosts, self.services, self.timeperiods, self.macromodulations, self.checkmodulations, self.businessimpactmodulations, self.resultmodulations, self.checks, self.pushed_conf.log_active_checks and not chk.passive_check) # # Raise the log only when the check got consumed! # # Else the item information are not up-to-date :/ # if self.pushed_conf.log_active_checks and not chk.passive_check: # item.raise_check_result() # for check in dep_checks: logger.debug("-> raised a dependency check: %s", check) self.add(check) # loop to resolve dependencies have_resolved_checks = True while have_resolved_checks: have_resolved_checks = False # All 'finished' checks (no more dep) raise checks they depend on for chk in list(self.checks.values()): if chk.status == ACT_STATUS_WAITING_ME: for dependent_checks in chk.depend_on_me: # Ok, now dependent will no more wait dependent_checks.depend_on.remove(chk.uuid) have_resolved_checks = True # REMOVE OLD DEP CHECK -> zombie chk.status = ACT_STATUS_ZOMBIE # Now, inclmude dependent checks for chk in list(self.checks.values()): if chk.status == ACT_STATUS_WAIT_DEPEND and not chk.depend_on: item = self.find_item_by_id(chk.ref) notification_period = None if getattr(item, 'notification_period', None) is not None: notification_period = self.timeperiods[item.notification_period] dep_checks = item.consume_result(chk, notification_period, self.hosts, self.services, self.timeperiods, self.macromodulations, self.checkmodulations, self.businessimpactmodulations, self.resultmodulations, self.checks, self.pushed_conf.log_active_checks and not chk.passive_check) for check in dep_checks: self.add(check) def delete_zombie_checks(self): """Remove checks that have a zombie status (usually timeouts) :return: None """ id_to_del = [] for chk in list(self.checks.values()): if chk.status == ACT_STATUS_ZOMBIE: id_to_del.append(chk.uuid) # une petite tape dans le dos et tu t'en vas, merci... # *pat pat* GFTO, thks :) for c_id in id_to_del: del self.checks[c_id] # ZANKUSEN! def delete_zombie_actions(self): """Remove actions that have a zombie status (usually timeouts) :return: None """ id_to_del = [] for act in list(self.actions.values()): if act.status == ACT_STATUS_ZOMBIE: id_to_del.append(act.uuid) # une petite tape dans le dos et tu t'en vas, merci... # *pat pat* GFTO, thks :) for a_id in id_to_del: del self.actions[a_id] # ZANKUSEN! def update_downtimes_and_comments(self): # pylint: disable=too-many-branches """Iter over all hosts and services:: TODO: add some unit tests for the maintenance period feature. * Update downtime status (start / stop) regarding maintenance period * Register new comments in comments list :return: None """ broks = [] now = time.time() # Check maintenance periods for elt in self.all_my_hosts_and_services(): if not elt.maintenance_period: continue if not elt.in_maintenance: timeperiod = self.timeperiods[elt.maintenance_period] if timeperiod.is_time_valid(now): start_dt = timeperiod.get_next_valid_time_from_t(now) end_dt = timeperiod.get_next_invalid_time_from_t(start_dt + 1) - 1 data = { 'ref': elt.uuid, 'ref_type': elt.my_type, 'start_time': start_dt, 'end_time': end_dt, 'fixed': 1, 'trigger_id': '', 'duration': 0, 'author': "Alignak", 'comment': "This downtime was automatically scheduled by Alignak " "because of a maintenance period." } downtime = Downtime(data) self.add(downtime.add_automatic_comment(elt)) elt.add_downtime(downtime) self.add(downtime) self.get_and_register_status_brok(elt) elt.in_maintenance = downtime.uuid else: if elt.in_maintenance not in elt.downtimes: # the main downtimes has expired or was manually deleted elt.in_maintenance = '' # Check the validity of contact downtimes for elt in self.contacts: for downtime_id in elt.downtimes: downtime = elt.downtimes[downtime_id] downtime.check_activation(self.contacts) # A loop where those downtimes are removed # which were marked for deletion (mostly by dt.exit()) for elt in self.all_my_hosts_and_services(): for downtime in list(elt.downtimes.values()): if not downtime.can_be_deleted: continue logger.debug("Downtime to delete: %s", downtime.__dict__) elt.del_downtime(downtime.uuid) broks.append(elt.get_update_status_brok()) # Same for contact downtimes: for elt in self.contacts: for downtime in list(elt.downtimes.values()): if not downtime.can_be_deleted: continue elt.del_downtime(downtime.uuid) broks.append(elt.get_update_status_brok()) # Check start and stop times for elt in self.all_my_hosts_and_services(): for downtime in list(elt.downtimes.values()): if downtime.real_end_time < now: # this one has expired broks.extend(downtime.exit(self.timeperiods, self.hosts, self.services)) elif now >= downtime.start_time and downtime.fixed and not downtime.is_in_effect: # this one has to start now broks.extend(downtime.enter(self.timeperiods, self.hosts, self.services)) broks.append(self.find_item_by_id(downtime.ref).get_update_status_brok()) for brok in broks: self.add(brok) def schedule(self, elements=None): """Iterate over all hosts and services and call schedule method (schedule next check) If elements is None all our hosts and services are scheduled for a check. :param elements: None or list of host / services to schedule :type elements: None | list :return: None """ if not elements: elements = self.all_my_hosts_and_services() # ask for service and hosts their next check for elt in elements: logger.debug("Add check for: %s", elt) self.add(elt.schedule(self.hosts, self.services, self.timeperiods, self.macromodulations, self.checkmodulations, self.checks)) def get_new_actions(self): """Call 'get_new_actions' hook point Iter over all hosts and services to add new actions in internal lists :return: None """ _t0 = time.time() self.hook_point('get_new_actions') statsmgr.timer('hook.get-new-actions', time.time() - _t0) # ask for service and hosts their next check for elt in self.all_my_hosts_and_services(): for action in elt.actions: logger.debug("Got a new action for %s: %s", elt, action) self.add(action) # We take all, we can clear it elt.actions = [] def get_new_broks(self): """Iter over all hosts and services to add new broks in internal lists :return: None """ # ask for service and hosts their broks waiting # be eaten for elt in self.all_my_hosts_and_services(): for brok in elt.broks: self.add(brok) # We got all, clear item broks list elt.broks = [] # Also fetch broks from contact (like contactdowntime) for contact in self.contacts: for brok in contact.broks: self.add(brok) # We got all, clear contact broks list contact.broks = [] def check_freshness(self): """ Iter over all hosts and services to check freshness if check_freshness enabled and passive_checks_enabled are set For the host items, the list of hosts to check contains hosts that: - have freshness check enabled - are not yet freshness expired - are only passively checked For the service items, the list of services to check contains services that: - do not depend upon an host that is freshness expired - have freshness check enabled - are not yet freshness expired - are only passively checked :return: None """ # Get tick count # (_, _, tick) = self.recurrent_works['check_freshness'] _t0 = time.time() now = int(_t0) items = [] # May be self.ticks is not set (unit tests context!) ticks = getattr(self, 'ticks', self.pushed_conf.host_freshness_check_interval) if self.pushed_conf.check_host_freshness \ and ticks % self.pushed_conf.host_freshness_check_interval == 0: # Freshness check is configured for hosts - get the list of concerned hosts: # host check freshness is enabled and the host is only passively checked hosts = [h for h in self.hosts if h.check_freshness and not h.freshness_expired and h.passive_checks_enabled and not h.active_checks_enabled] statsmgr.gauge('freshness.hosts-count', len(hosts)) items.extend(hosts) logger.debug("Freshness check is enabled for %d hosts", len(hosts)) hosts = [h for h in self.hosts if h.check_freshness and h.freshness_expired] logger.debug("Freshness still expired for %d hosts", len(hosts)) for h in hosts: h.last_chk = now self.add(h.get_check_result_brok()) # Update check output with last freshness check time h.output = "Freshness period expired: %s, last updated: %s" % ( datetime.utcfromtimestamp(h.last_hard_state_change).strftime( "%Y-%m-%d %H:%M:%S %Z"), datetime.utcfromtimestamp(h.last_chk).strftime( "%Y-%m-%d %H:%M:%S %Z")) logger.debug("Freshness still expired: %s / %s", h.get_name(), h.output) # May be self.ticks is not set (unit tests context!) ticks = getattr(self, 'ticks', self.pushed_conf.service_freshness_check_interval) if self.pushed_conf.check_service_freshness \ and ticks % self.pushed_conf.service_freshness_check_interval == 0: # Freshness check is configured for services - get the list of concerned services: # service check freshness is enabled and the service is only passively checked and # the depending host is not freshness expired services = [s for s in self.services if not self.hosts[s.host].freshness_expired and s.check_freshness and not s.freshness_expired and s.passive_checks_enabled and not s.active_checks_enabled] statsmgr.gauge('freshness.services-count', len(services)) items.extend(services) logger.debug("Freshness check is enabled for %d services", len(services)) services = [s for s in self.services if not self.hosts[s.host].freshness_expired and s.check_freshness and s.freshness_expired] logger.debug("Freshness still expired for %d services", len(services)) for s in services: s.last_chk = now self.add(s.get_check_result_brok()) # Update check output with last freshness check time s.output = "Freshness period expired: %s, last updated: %s" % ( datetime.utcfromtimestamp(s.last_hard_state_change).strftime( "%Y-%m-%d %H:%M:%S %Z"), datetime.utcfromtimestamp(s.last_chk).strftime( "%Y-%m-%d %H:%M:%S %Z")) logger.debug("Freshness still expired: %s / %s", s.get_full_name(), s.output) statsmgr.timer('freshness.items-list', time.time() - _t0) if not items: logger.debug("No freshness enabled item.") return _t0 = time.time() raised_checks = 0 for elt in items: chk = elt.do_check_freshness(self.hosts, self.services, self.timeperiods, self.macromodulations, self.checkmodulations, self.checks, _t0) if chk is not None: self.add(chk) self.waiting_results.put(chk) raised_checks += 1 logger.info("Raised %d checks for freshness", raised_checks) statsmgr.gauge('freshness.raised-checks', raised_checks) statsmgr.timer('freshness.do-check', time.time() - _t0) def check_orphaned(self): """Check for orphaned checks/actions:: * status == 'in_poller' and t_to_go < now - time_to_orphanage (300 by default) if so raise a warning log. :return: None """ orphans_count = {} now = int(time.time()) actions = list(self.checks.values()) + list(self.actions.values()) for chk in actions: if chk.status not in [ACT_STATUS_POLLED]: continue time_to_orphanage = self.find_item_by_id(chk.ref).get_time_to_orphanage() if not time_to_orphanage: continue if chk.t_to_go > now - time_to_orphanage: continue logger.info("Orphaned %s (%d s / %s / %s) check for: %s (%s)", chk.is_a, time_to_orphanage, chk.t_to_go, now, self.find_item_by_id(chk.ref).get_full_name(), chk) chk._is_orphan = True chk.status = ACT_STATUS_SCHEDULED if chk.my_worker not in orphans_count: orphans_count[chk.my_worker] = 0 orphans_count[chk.my_worker] += 1 for sta_name in orphans_count: logger.warning("%d actions never came back for the satellite '%s'. " "I reenable them for polling.", orphans_count[sta_name], sta_name) def send_broks_to_modules(self): """Put broks into module queues Only broks without sent_to_externals to True are sent Only modules that ask for broks will get some :return: None """ t00 = time.time() nb_sent = 0 broks = [] for broker_link in list(self.my_daemon.brokers.values()): for brok in broker_link.broks: if not getattr(brok, 'sent_to_externals', False): brok.to_send = True broks.append(brok) if not broks: return logger.debug("sending %d broks to modules...", len(broks)) for mod in self.my_daemon.modules_manager.get_external_instances(): logger.debug("Look for sending to module %s", mod.get_name()) module_queue = mod.to_q if module_queue: to_send = [b for b in broks if mod.want_brok(b)] module_queue.put(to_send) nb_sent += len(to_send) # No more need to send them for broker_link in list(self.my_daemon.brokers.values()): for brok in broker_link.broks: if not getattr(brok, 'sent_to_externals', False): brok.to_send = False brok.sent_to_externals = True logger.debug("Time to send %d broks (after %d secs)", nb_sent, time.time() - t00) def get_objects_from_from_queues(self): """Same behavior than Daemon.get_objects_from_from_queues(). :return: :rtype: """ return self.my_daemon.get_objects_from_from_queues() def get_scheduler_stats(self, details=False): # pylint: disable=unused-argument # pylint: disable=too-many-locals, too-many-branches """Get the scheduler statistics :return: A dict with the following structure :: { 'modules': [ {'internal': {'name': "MYMODULE1", 'state': 'ok'}, {'external': {'name': "MYMODULE2", 'state': 'stopped'}, ] 'latency': {'avg': lat_avg, 'min': lat_min, 'max': lat_max} 'hosts': len(self.hosts), 'services': len(self.services), 'commands': [{'cmd': c, 'u_time': u_time, 's_time': s_time}, ...] (10 first) 'livesynthesis': {...} } :rtype: dict """ m_solver = MacroResolver() res = { '_freshness': int(time.time()), 'counters': {}, 'latency': self.stats['latency'], 'monitored_objects': {}, 'livesynthesis': {} } checks_status_counts = self.get_checks_status_counts() # Checks / actions counters for what in (u'actions', u'checks'): res['counters']['%s.count' % what] = len(getattr(self, what)) for status in (u'scheduled', u'in_poller', u'zombie'): res['counters']['%s.%s' % (what, status)] = checks_status_counts[status] if self.pushed_conf: for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()): # Internal statistics res['monitored_objects'][strclss] = len(getattr(self, strclss, [])) # Scheduler live synthesis res['livesynthesis'] = { 'hosts_total': m_solver._get_total_hosts(), 'hosts_not_monitored': m_solver._get_total_hosts_not_monitored(), 'hosts_up_hard': m_solver._get_total_hosts_up(u'HARD'), 'hosts_up_soft': m_solver._get_total_hosts_up(u'SOFT'), 'hosts_down_hard': m_solver._get_total_hosts_down(u'HARD'), 'hosts_down_soft': m_solver._get_total_hosts_down(u'SOFT'), 'hosts_unreachable_hard': m_solver._get_total_hosts_unreachable(u'HARD'), 'hosts_unreachable_soft': m_solver._get_total_hosts_unreachable(u'SOFT'), 'hosts_problems': m_solver._get_total_hosts_problems_unhandled(), 'hosts_acknowledged': m_solver._get_total_hosts_problems_handled(), 'hosts_in_downtime': m_solver._get_total_hosts_downtimed(), 'hosts_flapping': m_solver._get_total_hosts_flapping(), 'services_total': m_solver._get_total_services(), 'services_not_monitored': m_solver._get_total_services_not_monitored(), 'services_ok_hard': m_solver._get_total_services_ok(u'HARD'), 'services_ok_soft': m_solver._get_total_services_ok(u'SOFT'), 'services_warning_hard': m_solver._get_total_services_warning(u'HARD'), 'services_warning_soft': m_solver._get_total_services_warning(u'SOFT'), 'services_critical_hard': m_solver._get_total_services_critical(u'HARD'), 'services_critical_soft': m_solver._get_total_services_critical(u'SOFT'), 'services_unknown_hard': m_solver._get_total_services_unknown(u'HARD'), 'services_unknown_soft': m_solver._get_total_services_unknown(u'SOFT'), 'services_unreachable_hard': m_solver._get_total_services_unreachable(u'HARD'), 'services_unreachable_soft': m_solver._get_total_services_unreachable(u'SOFT'), 'services_problems': m_solver._get_total_services_problems_unhandled(), 'services_acknowledged': m_solver._get_total_services_problems_handled(), 'services_in_downtime': m_solver._get_total_services_downtimed(), 'services_flapping': m_solver._get_total_services_flapping() } if details: # Hosts/services problems list all_problems = {} for item in self.hosts: if item.state_type not in [u'HARD'] or item.state not in ['DOWN']: continue if item.is_problem and not item.problem_has_been_acknowledged: all_problems[item.uuid] = { 'host': item.get_name(), 'service': None, 'state': item.state, 'state_type': item.state_type, 'output': item.output, 'last_state': item.last_state, 'last_state_type': item.last_state_type, 'last_state_update': item.last_state_update, 'last_state_change': item.last_state_change, 'last_hard_state_change': item.last_hard_state_change, 'last_hard_state': item.last_hard_state, } for item in self.services: if item.state_type not in [u'HARD'] or item.state not in ['WARNING', 'CRITICAL']: continue if item.is_problem and not item.problem_has_been_acknowledged: all_problems[item.uuid] = { 'host': item.host_name, 'service': item.get_name(), 'output': item.output, 'state': item.state, 'state_type': item.state_type, 'last_state': item.last_state, 'last_state_type': item.last_state_type, 'last_hard_state': item.last_hard_state, 'last_state_update': item.last_state_update, 'last_state_change': item.last_state_change, 'last_hard_state_change': item.last_hard_state_change, } res['problems'] = all_problems all_commands = {} # Some checks statistics: user/system time for elt in self.all_my_hosts_and_services(): last_cmd = elt.last_check_command if not last_cmd: continue cmd = os.path.split(last_cmd.split(' ', 1)[0])[1] u_time = elt.u_time s_time = elt.s_time old_u_time, old_s_time = all_commands.get(cmd, (0.0, 0.0)) interval = elt.check_interval if not interval: interval = 1 old_u_time += u_time / interval old_s_time += s_time / interval all_commands[cmd] = (old_u_time, old_s_time) # Return all the commands res['commands'] = all_commands return res def get_latency_average_percentile(self): """ Get a overview of the latencies with just a 95 percentile + min/max values :return: None """ (_, _, time_interval) = self.recurrent_works[21] last_time = time.time() - time_interval latencies = [s.latency for s in self.services if s.last_chk > last_time] lat_avg, lat_min, lat_max = average_percentile(latencies) if lat_avg is not None: self.stats['latency']['avg'] = lat_avg self.stats['latency']['min'] = lat_min self.stats['latency']['max'] = lat_max logger.debug("Latency (avg/min/max): %.2f/%.2f/%.2f", lat_avg, lat_min, lat_max) def get_checks_status_counts(self, checks=None): """ Compute the counts of the different checks status and return it as a defaultdict(int) with the keys being the different status and the values being the count of the checks in that status. :checks: None or the checks you want to count their statuses. If None then self.checks is used. :param checks: NoneType | dict :type checks: None | dict :return: :rtype: defaultdict(int) """ if checks is None: checks = self.checks res = defaultdict(int) res["total"] = len(checks) for chk in checks.values(): res[chk.status] += 1 return res def find_item_by_id(self, object_id): """Get item based on its id or uuid :param object_id: :type object_id: str :return: :rtype: alignak.objects.item.Item | None """ # Item id may be an item if isinstance(object_id, Item): return object_id # Item id should be a uuid string if not isinstance(object_id, string_types): logger.debug("Find an item by id, object_id is not int nor string: %s", object_id) return object_id for items in [self.hosts, self.services, self.actions, self.checks, self.hostgroups, self.servicegroups, self.contacts, self.contactgroups]: if object_id in items: return items[object_id] # raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover, logger.error("Item with id %s not found", str(object_id)) # pragma: no cover, return None # simple protection this should never happen def before_run(self): """Initialize the scheduling process""" # Actions and checks counters self.nb_checks = 0 self.nb_internal_checks = 0 self.nb_checks_launched = 0 self.nb_actions_launched = 0 self.nb_checks_results = 0 self.nb_checks_results_timeout = 0 self.nb_checks_results_passive = 0 self.nb_checks_results_active = 0 self.nb_actions_results = 0 self.nb_actions_results_timeout = 0 self.nb_actions_results_passive = 0 self.nb_broks_dropped = 0 self.nb_checks_dropped = 0 self.nb_actions_dropped = 0 # Broks, notifications, ... counters self.nb_broks = 0 self.nb_notifications = 0 self.nb_event_handlers = 0 self.nb_external_commands = 0 self.ticks = 0 def after_run(self): """After the scheduling process""" # We must save the retention at the quit BY OURSELVES # because our daemon will not be able to do it for us self.update_retention() def run(self): # pylint: disable=too-many-locals, too-many-statements, too-many-branches """Main scheduler function:: * Load retention * Call 'pre_scheduler_mod_start' hook point * Start modules * Schedule first checks * Init connection with pollers/reactionners * Run main loop * Do recurrent works * Push/Get actions to passive satellites * Update stats * Call 'scheduler_tick' hook point * Save retention (on quit) :return: None """ if not self.must_schedule: logger.warning("#%d - scheduler is not active...", self.my_daemon.loop_count) return # Increment ticks count self.ticks += 1 loop_start_ts = time.time() # Do recurrent works like schedule, consume, delete_zombie_checks for i in self.recurrent_works: (name, fun, nb_ticks) = self.recurrent_works[i] # A 0 in the tick will just disable it if nb_ticks: if self.ticks % nb_ticks == 0: # Call it and save the time spend in it _t0 = time.time() fun() statsmgr.timer('loop.recurrent.%s' % name, time.time() - _t0) statsmgr.timer('loop.recurrent', time.time() - loop_start_ts) _ts = time.time() self.push_actions_to_passive_satellites() statsmgr.timer('loop.push_actions_to_passive_satellites', time.time() - _ts) _ts = time.time() self.get_results_from_passive_satellites() statsmgr.timer('loop.get_results_from_passive_satellites', time.time() - _ts) # Scheduler statistics # - broks / notifications counters if self.my_daemon.log_loop: logger.debug("Items (loop): broks: %d, notifications: %d, checks: %d, internal checks: " "%d, event handlers: %d, external commands: %d", self.nb_broks, self.nb_notifications, self.nb_checks, self.nb_internal_checks, self.nb_event_handlers, self.nb_external_commands) statsmgr.gauge('activity.checks', self.nb_checks) statsmgr.gauge('activity.internal_checks', self.nb_internal_checks) statsmgr.gauge('activity.launched_checks', self.nb_checks_launched) statsmgr.gauge('activity.checks_results', self.nb_checks_results) statsmgr.gauge('activity.checks_results_timeout', self.nb_checks_results_timeout) statsmgr.gauge('activity.checks_results_active', self.nb_checks_results_active) statsmgr.gauge('activity.checks_results_passive', self.nb_checks_results_passive) statsmgr.gauge('activity.launched_actions', self.nb_actions_launched) statsmgr.gauge('activity.actions_results', self.nb_actions_results) statsmgr.gauge('activity.actions_results_timeout', self.nb_actions_results_timeout) statsmgr.gauge('activity.broks', self.nb_broks) statsmgr.gauge('activity.external_commands', self.nb_external_commands) statsmgr.gauge('activity.notifications', self.nb_notifications) statsmgr.gauge('activity.event_handlers', self.nb_event_handlers) if self.my_daemon.need_dump_environment: _ts = time.time() logger.debug('I must dump my memory...') self.my_daemon.dump_environment() self.my_daemon.need_dump_environment = False statsmgr.timer('loop.memory_dump', time.time() - _ts) if self.my_daemon.need_objects_dump: _ts = time.time() logger.debug('I must dump my objects...') self.dump_objects() self.dump_config() self.my_daemon.need_objects_dump = False statsmgr.timer('loop.objects_dump', time.time() - _ts) _ts = time.time() self.hook_point('scheduler_tick') statsmgr.timer('loop.hook-tick', time.time() - _ts) if self.my_daemon.log_loop: elapsed_time = time.time() - self.my_daemon.start_time logger.debug("Check average (total) = %d checks results, %.2f checks/s", self.nb_checks, self.nb_checks / elapsed_time) if self.nb_checks_dropped > 0 \ or self.nb_broks_dropped > 0 or self.nb_actions_dropped > 0: logger.warning("We dropped %d checks, %d broks and %d actions", self.nb_checks_dropped, self.nb_broks_dropped, self.nb_actions_dropped) statsmgr.gauge('activity.broks_dropped', self.nb_broks_dropped) statsmgr.gauge('activity.checks_dropped', self.nb_checks_dropped) statsmgr.gauge('activity.actions_dropped', self.nb_actions_dropped) self.nb_checks_dropped = self.nb_broks_dropped = self.nb_actions_dropped = 0
class Scheduler(object): '''Scheduler class. Mostly handle scheduling items (host service) to schedule checks raise alerts, manage downtimes, etc.''' def __init__(self, scheduler_daemon): '''Receives the daemon this Scheduler is attached to :param scheduler_daemon: schedulerdaemon :type scheduler_daemon: alignak.daemons.schedulerdaemon.Alignak ''' pass @property def name(self): '''Get the scheduler name Indeed, we return our suffixed daemon name :return: :rtype: ''' pass def reset(self): '''Reset scheduler:: * Remove waiting results * Clear checks and actions lists :return: None ''' pass def all_my_hosts_and_services(self): '''Create an iterator for all my known hosts and services :return: None ''' pass def load_conf(self, instance_id, instance_name, conf): '''Load configuration received from Arbiter and pushed by our Scheduler daemon :param instance_name: scheduler instance name :type instance_name: str :param instance_id: scheduler instance id :type instance_id: str :param conf: configuration to load :type conf: alignak.objects.config.Config :return: None ''' pass def update_recurrent_works_tick(self, conf): '''Modify the tick value for the scheduler recurrent work A tick is an amount of loop of the scheduler before executing the recurrent work The provided configuration may contain some tick-function_name keys that contain a tick value to be updated. Those parameters are defined in the alignak environment file. Indeed this function is called with the Scheduler daemon object. Note that the ``conf`` parameter may also be a dictionary. :param conf: the daemon link configuration to search in :type conf: alignak.daemons.schedulerdaemon.Alignak :return: None ''' pass def start_scheduling(self): '''Set must_schedule attribute to True - enable the scheduling loop :return: None ''' pass def stop_scheduling(self): '''Set must_schedule attribute to False - disable the scheduling loop :return: None ''' pass def dump_objects(self): '''Dump scheduler objects into a dump (temp) file :return: None ''' pass def dump_config(self): '''Dump scheduler configuration into a temporary file The dumped content is JSON formatted :return: None ''' pass def run_external_commands(self, cmds): '''Run external commands Arbiter/Receiver sent :param cmds: commands to run :type cmds: list :return: None ''' pass def add_brok(self, brok, broker_uuid=None): '''Add a brok into brokers list It can be for a specific one, all brokers or none (startup) :param brok: brok to add :type brok: alignak.brok.Brok :param broker_uuid: broker uuid for the brok :type broker_uuid: str :return: None ''' pass def add_notification(self, notification): '''Add a notification into actions list :param notification: notification to add :type notification: alignak.notification.Notification :return: None ''' pass def add_check(self, check): '''Add a check into the scheduler checks list :param check: check to add :type check: alignak.check.Check :return: None ''' pass def add_event_handler(self, action): '''Add a event handler into actions list :param action: event handler to add :type action: alignak.eventhandler.EventHandler :return: None ''' pass def add_external_command(self, ext_cmd): '''Resolve external command :param ext_cmd: extermal command to run :type ext_cmd: alignak.external_command.ExternalCommand :return: None ''' pass def add_brok(self, brok, broker_uuid=None): '''Generic function to add objects into the scheduler daemon internal lists:: Brok -> self.broks Check -> self.checks Notification -> self.actions EventHandler -> self.actions For an ExternalCommand, tries to resolve the command :param elt: element to add :type elt: :return: None ''' pass def hook_point(self, hook_name): '''Generic function to call modules methods if such method is avalaible :param hook_name: function name to call :type hook_name: str :return:None ''' pass def clean_queues(self): '''Reduces internal list size to max allowed * checks and broks : 5 * length of hosts + services * actions : 5 * length of hosts + services + contacts :return: None ''' pass def clean_caches(self): '''Clean timperiods caches :return: None ''' pass def get_and_register_status_brok(self, item): '''Get a update status brok for item and add it :param item: item to get brok from :type item: alignak.objects.item.Item :return: None ''' pass def get_and_register_check_result_brok(self, item): '''Get a check result brok for item and add it :param item: item to get brok from :type item: alignak.objects.schedulingitem.SchedulingItem :return: None ''' pass def check_for_expire_acknowledge(self): '''Iter over host and service and check if any acknowledgement has expired :return: None ''' pass def update_business_values(self): '''Iter over host and service and update business_impact :return: None ''' pass def scatter_master_notifications(self): '''Generate children notifications from a master notification Also update notification number Master notification are raised when a notification must be sent out. They are not launched by reactionners (only children are) but they are used to build the children notifications. From one master notification, several children notifications may be built, indeed one per each contact... :return: None ''' pass def get_to_run_checks(self, do_checks=False, do_actions=False, poller_tags=None, reactionner_tags=None, worker_name='none', module_types=None): '''Get actions/checks for reactionner/poller Called by the poller to get checks (do_checks=True) and by the reactionner (do_actions=True) to get actions :param do_checks: do we get checks ? :type do_checks: bool :param do_actions: do we get actions ? :type do_actions: bool :param poller_tags: poller tags to filter :type poller_tags: list :param reactionner_tags: reactionner tags to filter :type reactionner_tags: list :param worker_name: worker name to fill check/action (to remember it) :type worker_name: str :param module_types: module type to filter :type module_types: list :return: Check/Action list with poller/reactionner tags matching and module type matching :rtype: list ''' pass def manage_results(self, action): '''Get result from pollers/reactionners (actives ones) :param action: check / action / event handler to handle :type action: :return: None ''' pass def push_actions_to_passive_satellites(self): '''Send actions/checks to passive poller/reactionners :return: None ''' pass def get_results_from_passive_satellites(self): '''Get actions/checks results from passive poller/reactionners :return: None ''' pass def manage_internal_checks(self): '''Run internal checks :return: None ''' pass def reset_topology_change_flag(self): '''Set topology_change attribute to False in all hosts and services :return: None ''' pass def update_retention(self): '''Call hook point 'save_retention'. Retention modules will write back retention (to file, db etc) :param forced: is update forced? :type forced: bool :return: None ''' pass def retention_load(self, forced=False): '''Call hook point 'load_retention'. Retention modules will read retention (from file, db etc) :param forced: is load forced? :type forced: bool :return: None ''' pass def log_initial_states(self): '''Raise hosts and services initial status logs First, raise hosts status and then services. This to allow the events log to be a little sorted. :return: None ''' pass def get_retention_data(self): '''Get all hosts and services data to be sent to the retention storage. This function only prepares the data because a module is in charge of making the data survive to the scheduler restart. todo: Alignak scheduler creates two separate dictionaries: hosts and services It would be better to merge the services into the host dictionary! :return: dict containing host and service data :rtype: dict ''' pass def restore_retention_data(self, data): '''Restore retention data Data coming from retention will override data coming from configuration It is kinda confusing when you modify an attribute (external command) and it get saved by retention :param data: data from retention :type data: dict :return: None ''' pass def restore_retention_data_item(self, data, item): ''' Restore data in item :param data: retention data of the item :type data: dict :param item: host or service item :type item: alignak.objects.host.Host | alignak.objects.service.Service :return: None ''' pass def fill_initial_broks(self, broker_name): '''Create initial broks for a specific broker :param broker_name: broker name :type broker_name: str :return: number of created broks ''' pass def initial_program_status(self): '''Create and add a program_status brok :return: None ''' pass def update_program_status(self): '''Create and add a update_program_status brok :return: None ''' pass def get_program_status_brok(self, brok_type='program_status'): '''Create a program status brok Initially builds the running properties and then, if initial status brok, get the properties from the Config class where an entry exist for the brok 'full_status' :return: Brok with program status data :rtype: alignak.brok.Brok ''' pass def consume_results(self): '''Handle results waiting in waiting_results list. Check ref will call consume result and update their status :return: None ''' pass def delete_zombie_checks(self): '''Remove checks that have a zombie status (usually timeouts) :return: None ''' pass def delete_zombie_actions(self): '''Remove actions that have a zombie status (usually timeouts) :return: None ''' pass def update_downtimes_and_comments(self): '''Iter over all hosts and services:: TODO: add some unit tests for the maintenance period feature. * Update downtime status (start / stop) regarding maintenance period * Register new comments in comments list :return: None ''' pass def schedule(self, elements=None): '''Iterate over all hosts and services and call schedule method (schedule next check) If elements is None all our hosts and services are scheduled for a check. :param elements: None or list of host / services to schedule :type elements: None | list :return: None ''' pass def get_new_actions(self): '''Call 'get_new_actions' hook point Iter over all hosts and services to add new actions in internal lists :return: None ''' pass def get_new_broks(self): '''Iter over all hosts and services to add new broks in internal lists :return: None ''' pass def check_freshness(self): ''' Iter over all hosts and services to check freshness if check_freshness enabled and passive_checks_enabled are set For the host items, the list of hosts to check contains hosts that: - have freshness check enabled - are not yet freshness expired - are only passively checked For the service items, the list of services to check contains services that: - do not depend upon an host that is freshness expired - have freshness check enabled - are not yet freshness expired - are only passively checked :return: None ''' pass def check_orphaned(self): '''Check for orphaned checks/actions:: * status == 'in_poller' and t_to_go < now - time_to_orphanage (300 by default) if so raise a warning log. :return: None ''' pass def send_broks_to_modules(self): '''Put broks into module queues Only broks without sent_to_externals to True are sent Only modules that ask for broks will get some :return: None ''' pass def get_objects_from_from_queues(self): '''Same behavior than Daemon.get_objects_from_from_queues(). :return: :rtype: ''' pass def get_scheduler_stats(self, details=False): '''Get the scheduler statistics :return: A dict with the following structure :: { 'modules': [ {'internal': {'name': "MYMODULE1", 'state': 'ok'}, {'external': {'name': "MYMODULE2", 'state': 'stopped'}, ] 'latency': {'avg': lat_avg, 'min': lat_min, 'max': lat_max} 'hosts': len(self.hosts), 'services': len(self.services), 'commands': [{'cmd': c, 'u_time': u_time, 's_time': s_time}, ...] (10 first) 'livesynthesis': {...} } :rtype: dict ''' pass def get_latency_average_percentile(self): ''' Get a overview of the latencies with just a 95 percentile + min/max values :return: None ''' pass def get_checks_status_counts(self, checks=None): ''' Compute the counts of the different checks status and return it as a defaultdict(int) with the keys being the different status and the values being the count of the checks in that status. :checks: None or the checks you want to count their statuses. If None then self.checks is used. :param checks: NoneType | dict :type checks: None | dict :return: :rtype: defaultdict(int) ''' pass def find_item_by_id(self, object_id): '''Get item based on its id or uuid :param object_id: :type object_id: str :return: :rtype: alignak.objects.item.Item | None ''' pass def before_run(self): '''Initialize the scheduling process''' pass def after_run(self): '''After the scheduling process''' pass def run_external_commands(self, cmds): '''Main scheduler function:: * Load retention * Call 'pre_scheduler_mod_start' hook point * Start modules * Schedule first checks * Init connection with pollers/reactionners * Run main loop * Do recurrent works * Push/Get actions to passive satellites * Update stats * Call 'scheduler_tick' hook point * Save retention (on quit) :return: None ''' pass
61
60
37
5
23
10
6
0.46
1
26
11
0
59
36
59
59
2,277
348
1,339
312
1,276
613
1,068
305
1,008
33
1
5
342
3,933
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/satellite.py
alignak.satellite.Satellite
class Satellite(BaseSatellite): # pylint: disable=too-many-instance-attributes """Satellite class. Sub-classed by Receiver, Reactionner and Poller """ do_checks = False do_actions = False my_type = '' properties = BaseSatellite.properties.copy() properties.update({ 'passive': BoolProp(default=False), 'max_plugins_output_length': IntegerProp(default=8192), 'min_workers': IntegerProp(default=0, fill_brok=[FULL_STATUS], to_send=True), 'max_workers': IntegerProp(default=0, fill_brok=[FULL_STATUS], to_send=True), 'processes_by_worker': IntegerProp(default=256, fill_brok=[FULL_STATUS], to_send=True), 'worker_polling_interval': IntegerProp(default=1, to_send=True), 'poller_tags': ListProp(default=['None'], to_send=True), 'reactionner_tags': ListProp(default=['None'], to_send=True), }) def __init__(self, name, **kwargs): super(Satellite, self).__init__(name, **kwargs) # Move these properties to the base Daemon ? # todo: change this? # Keep broks so they can be eaten by a broker self.broks = [] self.broks_lock = threading.RLock() # My active workers self.workers = {} # May be we are a passive daemon if self.passive: self.pre_log.append(("INFO", "Passive mode enabled.")) # Our tags # ['None'] is the default tags if self.type in ['poller'] and self.poller_tags: self.pre_log.append(("INFO", "Poller tags: %s" % self.poller_tags)) if self.type in ['reactionner'] and self.reactionner_tags: self.pre_log.append(("INFO", "Reactionner tags: %s" % self.reactionner_tags)) # Now the limit part, 0 means the number of cpu of this machine :) cpu_count = psutil.cpu_count() # Do not use the logger in this function because it is not yet initialized... self.pre_log.append(("INFO", "Detected %d CPUs" % cpu_count)) if self.max_workers == 0: try: # Preserve one CPU if more than one detected self.max_workers = max(cpu_count - 1, 1) except NotImplementedError: # pragma: no cover, simple protection self.max_workers = 1 if self.min_workers == 0: try: self.min_workers = max(cpu_count - 1, 1) except NotImplementedError: # pragma: no cover, simple protection self.min_workers = 1 self.pre_log.append(("INFO", "Using minimum %d workers, maximum %d workers, %d processes/worker" % (self.min_workers, self.max_workers, self.processes_by_worker))) self.slave_q = None self.returns_queue = None self.q_by_mod = {} # Modules are only loaded one time self.have_modules = False # round robin queue ic self.rr_qid = 0 def manage_action_return(self, action): """Manage action return from Workers We just put them into the corresponding sched and we clean unused properties like my_scheduler :param action: the action to manage :type action: alignak.action.Action :return: None """ # Maybe our workers send us something else than an action # if so, just add this in other queues and return # todo: test a class instance if action.__class__.my_type not in ['check', 'notification', 'eventhandler']: self.add(action) return # Ok, it's a result. Get the concerned scheduler uuid scheduler_uuid = action.my_scheduler logger.debug("Got action return: %s / %s", scheduler_uuid, action.uuid) try: # Now that we know where to put the action result, we do not need any reference to # the scheduler nor the worker del action.my_scheduler del action.my_worker except AttributeError: # pragma: no cover, simple protection logger.error("AttributeError Got action return: %s / %s", scheduler_uuid, action) # And we remove it from the actions queue of the scheduler too try: del self.schedulers[scheduler_uuid].actions[action.uuid] except KeyError as exp: logger.error("KeyError del scheduler action: %s / %s - %s", scheduler_uuid, action.uuid, str(exp)) # We tag it as "return wanted", and move it in the wait return queue try: self.schedulers[scheduler_uuid].wait_homerun[action.uuid] = action except KeyError as exp: # pragma: no cover, simple protection logger.error("KeyError Add home run action: %s / %s - %s", scheduler_uuid, action.uuid, str(exp)) def push_results(self): """Push the checks/actions results to our schedulers :return: None """ # For all schedulers, we check for wait_homerun # and we send back results for scheduler_link_uuid in self.schedulers: scheduler_link = self.schedulers[scheduler_link_uuid] if not scheduler_link.active: logger.warning("My scheduler '%s' is not active currently", scheduler_link.name) continue if not scheduler_link.wait_homerun: # Nothing to push back... continue # NB: it's **mostly** safe for us to not use some lock around # this 'results' / sched['wait_homerun']. # Because it can only be modified (for adding new values) by the # same thread running this function (that is the main satellite # thread), and this occurs exactly in self.manage_action_return(). # Another possibility is for the sched['wait_homerun'] to be # cleared within/by : # ISchedulers.get_results() -> Satelitte.get_return_for_passive() # This can so happen in an (http) client thread. results = scheduler_link.wait_homerun logger.debug("Pushing %d results to '%s'", len(results), scheduler_link.name) # So, at worst, some results would be received twice on the # scheduler level, which shouldn't be a problem given they are # indexed by their "action_id". scheduler_link.push_results(list(results.values()), self.name) results.clear() def create_and_launch_worker(self, module_name='fork'): """Create and launch a new worker, and put it into self.workers It can be mortal or not :param module_name: the module name related to the worker default is "fork" for no module Indeed, it is actually the module 'python_name' :type module_name: str :return: None """ logger.info("Allocating new '%s' worker...", module_name) # If we are in the fork module, we do not specify a target target = None __warned = [] if module_name == 'fork': target = None else: for module in self.modules_manager.instances: # First, see if the module name matches... if module.get_name() == module_name: # ... and then if is a 'worker' module one or not if not module.properties.get('worker_capable', False): raise NotWorkerMod target = module.work if target is None: if module_name not in __warned: logger.warning("No target found for %s, NOT creating a worker for it...", module_name) __warned.append(module_name) return # We give to the Worker the instance name of the daemon (eg. poller-master) # and not the daemon type (poller) queue = Queue() worker = Worker(module_name, queue, self.returns_queue, self.processes_by_worker, max_plugins_output_length=self.max_plugins_output_length, target=target, loaded_into=self.name) # worker.module_name = module_name # save this worker self.workers[worker.get_id()] = worker # And save the Queue of this worker, with key = worker id # self.q_by_mod[module_name][worker.uuid] = queue self.q_by_mod[module_name][worker.get_id()] = queue # Ok, all is good. Start it! worker.start() logger.info("Started '%s' worker: %s (pid=%d)", module_name, worker.get_id(), worker.get_pid()) def do_stop_workers(self): """Stop all workers :return: None """ logger.info("Stopping all workers (%d)", len(self.workers)) for worker in list(self.workers.values()): try: logger.info(" - stopping '%s'", worker.get_id()) worker.terminate() worker.join(timeout=1) logger.info(" - stopped") # A already dead worker or in a worker except (AttributeError, AssertionError): pass except Exception as exp: # pylint: disable=broad-except logger.error("exception: %s", str(exp)) def do_stop(self): """Stop my workers and stop :return: None """ self.do_stop_workers() super(Satellite, self).do_stop() def add(self, elt): """Generic function to add objects to the daemon internal lists. Manage Broks, External commands :param elt: object to add :type elt: alignak.AlignakObject :return: None """ if isinstance(elt, Brok): # For brok, we tag the brok with our instance_id elt.instance_id = self.instance_id if elt.type == 'monitoring_log': # The brok is a monitoring event with self.events_lock: self.events.append(elt) statsmgr.counter('events', 1) # Also add to our broks with self.broks_lock: self.broks.append(elt) statsmgr.counter('broks.added', 1) elif isinstance(elt, ExternalCommand): logger.debug("Queuing an external command '%s'", str(elt.__dict__)) with self.external_commands_lock: self.external_commands.append(elt) statsmgr.counter('external-commands.added', 1) def give_broks(self): """Get brok list from satellite :return: A copy of the broks list :rtype: list """ res = copy.deepcopy(self.broks) del self.broks[:] return res def check_and_del_zombie_workers(self): # pragma: no cover, not with unit tests... # pylint: disable= not-callable """Check if worker are fine and kill them if not. Dispatch the actions in the worker to another one TODO: see if unit tests would allow to check this code? :return: None """ # Active children make a join with everyone, useful :) # active_children() for p in active_children(): logger.debug("got child: %s", p) w_to_del = [] for worker in list(self.workers.values()): # If a worker goes down and we did not ask him, it's not # good: we can think that we have a worker and it's not True # So we del it logger.debug("checking if worker %s (pid=%d) is alive", worker.get_id(), worker.get_pid()) if not self.interrupted and not worker.is_alive(): logger.warning("The worker %s (pid=%d) went down unexpectedly!", worker.get_id(), worker.get_pid()) # Terminate immediately worker.terminate() worker.join(timeout=1) w_to_del.append(worker.get_id()) # OK, now really del workers from queues # And requeue the actions it was managed for worker_id in w_to_del: worker = self.workers[worker_id] # Del the queue of the module queue del self.q_by_mod[worker.module_name][worker.get_id()] for scheduler_uuid in self.schedulers: sched = self.schedulers[scheduler_uuid] for act in list(sched.actions.values()): if act.status == ACT_STATUS_QUEUED and act.my_worker == worker_id: # Got a check that will NEVER return if we do not restart it self.assign_to_a_queue(act) # So now we can really forgot it del self.workers[worker_id] def adjust_worker_number_by_load(self): """Try to create the minimum workers specified in the configuration :return: None """ if self.interrupted: logger.debug("Trying to adjust worker number. Ignoring because we are stopping.") return to_del = [] logger.debug("checking worker count." " Currently: %d workers, min per module : %d, max per module : %d", len(self.workers), self.min_workers, self.max_workers) # I want at least min_workers by module then if I can, I add worker for load balancing for mod in self.q_by_mod: # At least min_workers todo = max(0, self.min_workers - len(self.q_by_mod[mod])) for _ in range(todo): try: self.create_and_launch_worker(module_name=mod) except NotWorkerMod: # Maybe this module is not a true worker module. # if so, just delete if from q_by_mod to_del.append(mod) break for mod in to_del: logger.warning("The module %s is not a worker one, I remove it from the worker list.", mod) del self.q_by_mod[mod] # TODO: if len(workers) > 2*wish, maybe we can kill a worker? def _get_queue_for_the_action(self, action): """Find action queue for the action depending on the module. The id is found with action modulo on action id :param a: the action that need action queue to be assigned :type action: object :return: worker id and queue. (0, None) if no queue for the module_type :rtype: tuple """ # get the module name, if not, take fork mod = getattr(action, 'module_type', 'fork') queues = list(self.q_by_mod[mod].items()) # Maybe there is no more queue, it's very bad! if not queues: return (0, None) # if not get action round robin index to get action queue based # on the action id self.rr_qid = (self.rr_qid + 1) % len(queues) (worker_id, queue) = queues[self.rr_qid] # return the id of the worker (i), and its queue return (worker_id, queue) def add_actions(self, actions_list, scheduler_instance_id): """Add a list of actions to the satellite queues :param actions_list: Actions list to add :type actions_list: list :param scheduler_instance_id: sheduler link to assign the actions to :type scheduler_instance_id: SchedulerLink :return: None """ # We check for new check in each schedulers and put the result in new_checks scheduler_link = None for scheduler_id in self.schedulers: logger.debug("Trying to add an action, scheduler: %s", self.schedulers[scheduler_id]) if scheduler_instance_id == self.schedulers[scheduler_id].instance_id: scheduler_link = self.schedulers[scheduler_id] break else: logger.error("Trying to add actions from an unknown scheduler: %s", scheduler_instance_id) return if not scheduler_link: logger.error("Trying to add actions, but scheduler link is not found for: %s, " "actions: %s", scheduler_instance_id, actions_list) return logger.debug("Found scheduler link: %s", scheduler_link) for action in actions_list: # First we look if the action is identified uuid = getattr(action, 'uuid', None) if uuid is None: try: action = unserialize(action, no_json=True) uuid = action.uuid except AlignakClassLookupException: logger.error('Cannot un-serialize action: %s', action) continue # If we already have this action, we are already working for it! if uuid in scheduler_link.actions: continue # Action is attached to a scheduler action.my_scheduler = scheduler_link.uuid scheduler_link.actions[action.uuid] = action self.assign_to_a_queue(action) def assign_to_a_queue(self, action): """Take an action and put it to a worker actions queue :param action: action to put :type action: alignak.action.Action :return: None """ (worker_id, queue) = self._get_queue_for_the_action(action) if not worker_id: return # Tag the action as "in the worker i" action.my_worker = worker_id action.status = ACT_STATUS_QUEUED msg = Message(_type='Do', data=action, source=self.name) logger.debug("Queuing message: %s", msg) queue.put_nowait(msg) logger.debug("Queued") def get_new_actions(self): """ Wrapper function for do_get_new_actions For stats purpose :return: None TODO: Use a decorator for timing this function """ try: _t0 = time.time() self.do_get_new_actions() statsmgr.timer('actions.got.time', time.time() - _t0) except RuntimeError: logger.error("Exception like issue #1007") def do_get_new_actions(self): """Get new actions from schedulers Create a Message and put into the module queue REF: doc/alignak-action-queues.png (1) :return: None """ # Here are the differences between a poller and a reactionner: # Poller will only do checks, # Reactionner will do actions (notifications and event handlers) do_checks = self.__class__.do_checks do_actions = self.__class__.do_actions # We check and get the new actions to execute in each of our schedulers for scheduler_link_uuid in self.schedulers: scheduler_link = self.schedulers[scheduler_link_uuid] if not scheduler_link.active: logger.warning("My scheduler '%s' is not active currently", scheduler_link.name) continue logger.debug("get new actions, scheduler: %s", scheduler_link.name) # OK, go for it :) _t0 = time.time() actions = scheduler_link.get_actions({'do_checks': do_checks, 'do_actions': do_actions, 'poller_tags': self.poller_tags, 'reactionner_tags': self.reactionner_tags, 'worker_name': self.name, 'module_types': list(self.q_by_mod.keys())}) if actions: logger.debug("Got %d actions from %s", len(actions), scheduler_link.name) # We 'tag' them with my_scheduler and put into queue for workers self.add_actions(actions, scheduler_link.instance_id) logger.debug("Got %d actions from %s in %s", len(actions), scheduler_link.name, time.time() - _t0) statsmgr.gauge('actions.added.count.%s' % (scheduler_link.name), len(actions)) def clean_previous_run(self): """Clean variables from previous configuration, such as schedulers, broks and external commands :return: None """ # Execute the base class treatment... super(Satellite, self).clean_previous_run() # Clean my lists del self.broks[:] del self.events[:] def do_loop_turn(self): # pylint: disable=too-many-branches """Satellite main loop:: * Check and delete zombies actions / modules * Get returns from queues * Adjust worker number * Get new actions :return: None """ # Try to see if one of my module is dead, and restart previously dead modules self.check_and_del_zombie_modules() # Also if some zombie workers exist... self.check_and_del_zombie_workers() # Call modules that manage a starting tick pass self.hook_point('tick') # Print stats for debug for _, sched in self.schedulers.items(): for mod in self.q_by_mod: # In workers we've got actions sent to queue - queue size for (worker_id, queue) in list(self.q_by_mod[mod].items()): try: actions_count = queue.qsize() results_count = self.returns_queue.qsize() logger.debug("[%s][%s][%s] actions queued: %d, results queued: %d", sched.name, mod, worker_id, actions_count, results_count) # Update the statistics statsmgr.gauge('worker.%s.actions-queue-size' % worker_id, actions_count) statsmgr.gauge('worker.%s.results-queue-size' % worker_id, results_count) except (IOError, EOFError): pass # todo temporaray deactivate all this stuff! # Before return or get new actions, see how we managed # the former ones: are they still in queue(s)? If so, we # must wait more or at least have more workers # wait_ratio = self.wait_ratio.get_load() # total_q = 0 # try: # for mod in self.q_by_mod: # for queue in list(self.q_by_mod[mod].values()): # total_q += queue.qsize() # except (IOError, EOFError): # pass # if total_q != 0 and wait_ratio < 2 * self.worker_polling_interval: # logger.debug("I decide to increase the wait ratio") # self.wait_ratio.update_load(wait_ratio * 2) # # self.wait_ratio.update_load(self.worker_polling_interval) # else: # # Go to self.worker_polling_interval on normal run, if wait_ratio # # was >2*self.worker_polling_interval, # # it make it come near 2 because if < 2, go up :) # self.wait_ratio.update_load(self.worker_polling_interval) # wait_ratio = self.wait_ratio.get_load() # statsmgr.timer('core.wait-ratio', wait_ratio) # if self.log_loop: # logger.debug("[%s] wait ratio: %f", self.name, wait_ratio) # Maybe we do not have enough workers, we check for it # and launch the new ones if needed self.adjust_worker_number_by_load() # Manage all messages we've got in the last timeout # for queue in self.return_messages: try: logger.debug("[%s] manage action results: %d results", self.name, self.returns_queue.qsize()) while self.returns_queue.qsize(): msg = self.returns_queue.get_nowait() if msg is None: continue if not isinstance(msg, Message): logger.warning("Should have received a Message, got a %s!", type(msg)) continue logger.debug("Got a message: %s", msg) if msg.get_type() == 'Done': logger.debug("Got (from %s) an action result: %s", msg.get_source(), msg.get_data()) self.manage_action_return(msg.get_data()) elif msg.get_type() == 'Stats': logger.debug("Got (from %s) stats: %s", msg.get_source(), msg.get_data()) if msg.get_source() in self.workers: self.workers[msg.get_source()].stats = msg.get_data() else: logger.warning("Ignoring message of type: %s", msg.get_type()) except Full: logger.warning("Returns queue is full") except Empty: logger.debug("Returns queue is empty") except (IOError, EOFError) as exp: logger.warning("My returns queue is no more available: %s", str(exp)) except Exception as exp: # pylint: disable=broad-except logger.error("Failed getting messages in returns queue: %s", str(exp)) logger.error(traceback.format_exc()) for _, sched in self.schedulers.items(): if sched.wait_homerun: logger.debug("scheduler home run: %d results", len(sched.wait_homerun)) if not self.passive: # If we are an active satellite, we do not initiate the check getting # and return try: # We send to our schedulers the results of all finished checks logger.debug("pushing results...") self.push_results() except LinkError as exp: logger.warning("Scheduler connection failed, I could not send my results!") try: # And we get the new actions from our schedulers logger.debug("getting new actions...") self.get_new_actions() except LinkError as exp: logger.warning("Scheduler connection failed, I could not get new actions!") # Get objects from our modules that are not Worker based if self.log_loop: logger.debug("[%s] get objects from queues", self.name) self.get_objects_from_from_queues() statsmgr.gauge('external-commands.count', len(self.external_commands)) statsmgr.gauge('broks.count', len(self.broks)) statsmgr.gauge('events.count', len(self.events)) def do_post_daemon_init(self): """Do this satellite (poller or reactionner) post "daemonize" init :return: None """ # We can open the Queue for fork AFTER self.q_by_mod['fork'] = {} # todo: check if this is always useful? self.returns_queue = Queue() def setup_new_conf(self): # pylint: disable=too-many-branches """Setup the new configuration received from Arbiter This function calls the base satellite treatment and manages the configuration needed for a simple satellite daemon that executes some actions (eg. poller or reactionner): - configure the passive mode - configure the workers - configure the tags - configure the modules :return: None """ # Execute the base class treatment... super(Satellite, self).setup_new_conf() # ...then our own specific treatment! with self.conf_lock: logger.info("Received a new configuration") # self_conf is our own configuration from the alignak environment # self_conf = self.cur_conf['self_conf'] # Now manage modules if not self.have_modules: try: self.modules = unserialize(self.cur_conf['modules'], no_json=True) except AlignakClassLookupException as exp: # pragma: no cover, simple protection logger.error('Cannot un-serialize modules configuration ' 'received from arbiter: %s', exp) if self.modules: logger.info("I received some modules configuration") self.have_modules = True for module in self.modules: if module.name not in self.q_by_mod: self.q_by_mod[module.name] = {} self.do_load_modules(self.modules) # and start external modules too self.modules_manager.start_external_instances() else: logger.info("I do not have modules") # Initialize connection with all our satellites logger.info("Initializing connection with my satellites:") my_satellites = self.get_links_of_type(s_type='') for satellite in list(my_satellites.values()): logger.info("- : %s/%s", satellite.type, satellite.name) if not self.daemon_connection_init(satellite): logger.error("Satellite connection failed: %s", satellite) # Now I have a configuration! self.have_conf = True def get_daemon_stats(self, details=False): """Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict """ # call the daemon one res = super(Satellite, self).get_daemon_stats(details=details) counters = res['counters'] counters['broks'] = len(self.broks) counters['events'] = len(self.events) counters['workers'] = len(self.workers) if self.workers: res['workers'] = {} for worker in list(self.workers.values()): stats = getattr(self.workers[worker.get_id()], 'stats', None) if stats: res['workers'][worker.get_id()] = stats return res def main(self): """Main satellite function. Do init and then mainloop :return: None """ try: # Start the daemon mode if not self.do_daemon_init_and_start(): self.exit_on_error(message="Daemon initialization error", exit_code=3) self.do_post_daemon_init() # We wait for initial conf self.wait_for_initial_conf() if self.new_conf: # Setup the received configuration self.setup_new_conf() # Allocate Mortal Threads self.adjust_worker_number_by_load() # Now main loop self.do_main_loop() logger.info("Exited from the main loop.") self.request_stop() except Exception: # pragma: no cover, this should never happen indeed ;) self.exit_on_exception(traceback.format_exc()) raise
class Satellite(BaseSatellite): '''Satellite class. Sub-classed by Receiver, Reactionner and Poller ''' def __init__(self, name, **kwargs): pass def manage_action_return(self, action): '''Manage action return from Workers We just put them into the corresponding sched and we clean unused properties like my_scheduler :param action: the action to manage :type action: alignak.action.Action :return: None ''' pass def push_results(self): '''Push the checks/actions results to our schedulers :return: None ''' pass def create_and_launch_worker(self, module_name='fork'): '''Create and launch a new worker, and put it into self.workers It can be mortal or not :param module_name: the module name related to the worker default is "fork" for no module Indeed, it is actually the module 'python_name' :type module_name: str :return: None ''' pass def do_stop_workers(self): '''Stop all workers :return: None ''' pass def do_stop_workers(self): '''Stop my workers and stop :return: None ''' pass def add(self, elt): '''Generic function to add objects to the daemon internal lists. Manage Broks, External commands :param elt: object to add :type elt: alignak.AlignakObject :return: None ''' pass def give_broks(self): '''Get brok list from satellite :return: A copy of the broks list :rtype: list ''' pass def check_and_del_zombie_workers(self): '''Check if worker are fine and kill them if not. Dispatch the actions in the worker to another one TODO: see if unit tests would allow to check this code? :return: None ''' pass def adjust_worker_number_by_load(self): '''Try to create the minimum workers specified in the configuration :return: None ''' pass def _get_queue_for_the_action(self, action): '''Find action queue for the action depending on the module. The id is found with action modulo on action id :param a: the action that need action queue to be assigned :type action: object :return: worker id and queue. (0, None) if no queue for the module_type :rtype: tuple ''' pass def add_actions(self, actions_list, scheduler_instance_id): '''Add a list of actions to the satellite queues :param actions_list: Actions list to add :type actions_list: list :param scheduler_instance_id: sheduler link to assign the actions to :type scheduler_instance_id: SchedulerLink :return: None ''' pass def assign_to_a_queue(self, action): '''Take an action and put it to a worker actions queue :param action: action to put :type action: alignak.action.Action :return: None ''' pass def get_new_actions(self): ''' Wrapper function for do_get_new_actions For stats purpose :return: None TODO: Use a decorator for timing this function ''' pass def do_get_new_actions(self): '''Get new actions from schedulers Create a Message and put into the module queue REF: doc/alignak-action-queues.png (1) :return: None ''' pass def clean_previous_run(self): '''Clean variables from previous configuration, such as schedulers, broks and external commands :return: None ''' pass def do_loop_turn(self): '''Satellite main loop:: * Check and delete zombies actions / modules * Get returns from queues * Adjust worker number * Get new actions :return: None ''' pass def do_post_daemon_init(self): '''Do this satellite (poller or reactionner) post "daemonize" init :return: None ''' pass def setup_new_conf(self): '''Setup the new configuration received from Arbiter This function calls the base satellite treatment and manages the configuration needed for a simple satellite daemon that executes some actions (eg. poller or reactionner): - configure the passive mode - configure the workers - configure the tags - configure the modules :return: None ''' pass def get_daemon_stats(self, details=False): '''Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict ''' pass def main(self): '''Main satellite function. Do init and then mainloop :return: None ''' pass
22
21
34
4
18
12
5
0.64
1
20
7
3
21
15
21
82
758
114
399
96
377
257
349
90
327
21
3
5
105
3,934
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/satellite.py
alignak.satellite.NotWorkerMod
class NotWorkerMod(Exception): """Class to tell that we are facing a non worker module but a standard one """ pass
class NotWorkerMod(Exception): '''Class to tell that we are facing a non worker module but a standard one ''' pass
1
1
0
0
0
0
0
1.5
1
0
0
0
0
0
0
10
6
1
2
1
1
3
2
1
1
0
3
0
0
3,935
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/property.py
alignak.property.UnusedProp
class UnusedProp(Property): """A unused Property. These are typically used by Nagios but no longer useful/used by Alignak. This is just to warn the user that the option he uses is no more used in Alignak. """ def __init__(self, text=None): """Create a new Unused property Since this property is not used, there is no use for other parameters than 'text'. 'text' a some usage text if present, will print it to explain why it's no more useful :param text: :type text: None | str :return: None """ super(UnusedProp, self).__init__(default=NONE_OBJECT, class_inherit=[], managed=True) if text is None: text = "This parameter is no longer useful in the Alignak architecture." self.text = text self.unused = True
class UnusedProp(Property): '''A unused Property. These are typically used by Nagios but no longer useful/used by Alignak. This is just to warn the user that the option he uses is no more used in Alignak. ''' def __init__(self, text=None): '''Create a new Unused property Since this property is not used, there is no use for other parameters than 'text'. 'text' a some usage text if present, will print it to explain why it's no more useful :param text: :type text: None | str :return: None ''' pass
2
2
20
3
8
9
2
1.56
1
1
0
0
1
2
1
4
29
6
9
4
7
14
7
4
5
2
2
1
2
3,936
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/property.py
alignak.property.ToGuessProp
class ToGuessProp(Property): """Unknown property encountered while parsing""" def pythonize(self, val): """If value is a single list element just return the element does nothing otherwise :param val: value to convert :type val: :return: converted value :rtype: """ if isinstance(val, list) and len(set(val)) == 1: # If we have a list with a unique value just use it return val[0] # Well, can't choose to remove something. return val
class ToGuessProp(Property): '''Unknown property encountered while parsing''' def pythonize(self, val): '''If value is a single list element just return the element does nothing otherwise :param val: value to convert :type val: :return: converted value :rtype: ''' pass
2
2
15
2
4
9
2
2
1
2
0
0
1
0
1
4
18
3
5
2
3
10
5
2
3
2
2
1
2
3,937
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/property.py
alignak.property.StringProp
class StringProp(Property): """String property""" def pythonize(self, val): """Convert value into a string:: * If value is a list, try to take the last element :param val: value to convert :type val: :return: str corresponding to value :rtype: str """ return unique_value(val).strip()
class StringProp(Property): '''String property''' def pythonize(self, val): '''Convert value into a string:: * If value is a list, try to take the last element :param val: value to convert :type val: :return: str corresponding to value :rtype: str ''' pass
2
2
11
2
2
7
1
2.67
1
0
0
2
1
0
1
4
14
3
3
2
1
8
3
2
1
1
2
0
1
3,938
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/arbiterlink.py
alignak.objects.arbiterlink.ArbiterLinks
class ArbiterLinks(SatelliteLinks): """ Class to manage list of ArbiterLink. ArbiterLinks is used to regroup all links with Arbiter daemon """ inner_class = ArbiterLink
class ArbiterLinks(SatelliteLinks): ''' Class to manage list of ArbiterLink. ArbiterLinks is used to regroup all links with Arbiter daemon ''' pass
1
1
0
0
0
0
0
2
1
0
0
0
0
0
0
48
6
0
2
2
1
4
2
2
1
0
3
0
0
3,939
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_freshness.py
tests.test_freshness.TestPassiveChecks
class TestPassiveChecks(AlignakTest): """ This class test passive checks for host and services """ def setUp(self): super(TestPassiveChecks, self).setUp() self.setup_with_file('cfg/cfg_passive_checks.cfg', dispatching=True) self.clear_logs() assert self.conf_is_correct def test_start_freshness_on_alignak_start(self): """ When alignak starts, freshness period also starts instead are stale and so in end of freshness :return: None """ # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) # Test if not schedule a check on passive service/host when start alignak. # So the freshness start (item.last_state_update) will begin with time.time() of start # Alignak host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.event_handler_enabled = False self.scheduler_loop(1, [[host, 0, 'UP']]) time.sleep(0.1) self.assert_actions_count(0) self.assert_checks_count(2) self.assert_checks_match(0, 'hostname test_router_0', 'command') self.assert_checks_match(1, 'hostname test_host_0', 'command') def test_freshness_state(self): """ Test that freshness_state property is correctly defined in item (host or service) :return: None """ assert self._arbiter.conf.host_freshness_check_interval == 60 for h in self._scheduler.hosts: print(("Host %s: freshness check: %s (%d s), state: %s/%s, last state update: %s" % (h.get_name(), h.check_freshness, h.freshness_threshold, h.state_type, h.state, h.last_state_update))) # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) print("Global passive checks parameters:") print((" - accept_passive_host_checks: %s" % self._arbiter.conf.accept_passive_host_checks)) assert self._arbiter.conf.accept_passive_host_checks is True print((" - accept_passive_service_checks: %s" % self._arbiter.conf.accept_passive_service_checks)) assert self._arbiter.conf.accept_passive_service_checks is True host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.event_handler_enabled = False host_a = self._scheduler.hosts.find_by_name("test_host_A") host_b = self._scheduler.hosts.find_by_name("test_host_B") host_c = self._scheduler.hosts.find_by_name("test_host_C") host_d = self._scheduler.hosts.find_by_name("test_host_D") host_e = self._scheduler.hosts.find_by_name("test_host_E") host_f = self._scheduler.hosts.find_by_name("test_host_F") assert "d" == host_a.freshness_state assert 2400 == host_a.freshness_threshold # Even if u is set in the configuration file, get "x" assert "x" == host_b.freshness_state assert 1800 == host_b.freshness_threshold assert "o" == host_c.freshness_state assert 3600 == host_c.freshness_threshold # New "x" value defined for this host assert "x" == host_d.freshness_state assert 3600 == host_d.freshness_threshold # "x" as default value assert "x" == host_e.freshness_state assert 3600 == host_e.freshness_threshold # "x" as default value - 1200 as default freshness threshold (global conf parameter) assert "x" == host_f.freshness_state assert 60 == host_f.freshness_threshold svc0 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_0") svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_1") svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_2") svc3 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_3") svc4 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_4") svc5 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_5") assert "o" == svc0.freshness_state assert "w" == svc1.freshness_state assert "c" == svc2.freshness_state assert "u" == svc3.freshness_state assert "x" == svc4.freshness_state assert "x" == svc5.freshness_state def test_freshness_expiration(self): """ When freshness period expires, set freshness state and output Test that on freshness period expiry, the item gets the freshness_state and its output is 'Freshness period expired' and that no check is scheduled to check the item (host / service) :return: None """ # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) host_a = self._scheduler.hosts.find_by_name("test_host_A") host_b = self._scheduler.hosts.find_by_name("test_host_B") host_c = self._scheduler.hosts.find_by_name("test_host_C") host_d = self._scheduler.hosts.find_by_name("test_host_D") host_e = self._scheduler.hosts.find_by_name("test_host_E") assert "d" == host_a.freshness_state assert "x" == host_b.freshness_state assert "o" == host_c.freshness_state assert "x" == host_d.freshness_state assert "x" == host_e.freshness_state svc0 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_0") svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_1") svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_2") svc3 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_3") svc4 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_4") svc5 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_5") assert "o" == svc0.freshness_state assert "w" == svc1.freshness_state assert "c" == svc2.freshness_state assert "u" == svc3.freshness_state assert "x" == svc4.freshness_state assert "x" == svc5.freshness_state host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.event_handler_enabled = False # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime now = int(time.time()) # Set last state update in the past... host_a.last_state_update = now - 10000 host_b.last_state_update = now - 10000 host_c.last_state_update = now - 10000 host_d.last_state_update = now - 10000 host_e.last_state_update = now - 10000 # Set last state update in the past... svc0.last_state_update = now - 10000 svc1.last_state_update = now - 10000 svc2.last_state_update = now - 10000 svc3.last_state_update = now - 10000 svc4.last_state_update = now - 10000 svc5.last_state_update = now - 10000 # expiry_date = time.strftime("%Y-%m-%d %H:%M:%S %Z") expiry_date = datetime.datetime.utcfromtimestamp(now).strftime("%Y-%m-%d %H:%M:%S %Z") self.scheduler_loop(1, [[host, 0, 'UP']]) # Time warp 5 seconds frozen_datetime.tick(delta=datetime.timedelta(seconds=5)) assert "OK" == svc0.state assert "WARNING" == svc1.state assert "CRITICAL" == svc2.state assert "UNKNOWN" == svc3.state assert "UNREACHABLE" == svc4.state assert "UNREACHABLE" == svc5.state assert "DOWN" == host_a.state assert "UNREACHABLE" == host_b.state assert "UP" == host_c.state assert "UNREACHABLE" == host_d.state assert "UNREACHABLE" == host_e.state items = [svc0, svc1, svc2, svc3, svc4, host_a, host_b, host_c, host_d] for item in items: print("%s / %s" % (item.get_name(), item.output)) for item in items: # Some have already been checked twice ... if item.get_name() in ['test_host_C', 'test_svc_0']: assert "Freshness period expired: %s, last updated: %s" \ % (expiry_date, expiry_date) == item.output else: assert "Freshness period expired: %s" \ % (expiry_date) == item.output self.assert_actions_count(0) # No raised notifications self.assert_checks_count(2) # test_host_0 and test_router_0 # Order is not guaranteed # self.assert_checks_match(0, 'hostname test_router_0', 'command') # self.assert_checks_match(1, 'hostname test_host_0', 'command') def test_freshness_disabled(self): """ When freshness is disabled for hosts or service, no state change :return: None """ self._scheduler.pushed_conf.check_host_freshness = False self._scheduler.pushed_conf.check_service_freshness = False # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) host_a = self._scheduler.hosts.find_by_name("test_host_A") host_b = self._scheduler.hosts.find_by_name("test_host_B") host_c = self._scheduler.hosts.find_by_name("test_host_C") host_d = self._scheduler.hosts.find_by_name("test_host_D") host_e = self._scheduler.hosts.find_by_name("test_host_E") assert "d" == host_a.freshness_state assert "x" == host_b.freshness_state assert "o" == host_c.freshness_state assert "x" == host_d.freshness_state assert "x" == host_e.freshness_state # Set last state update in the past... host_a.last_state_update = int(time.time()) - 10000 host_b.last_state_update = int(time.time()) - 10000 host_c.last_state_update = int(time.time()) - 10000 host_d.last_state_update = int(time.time()) - 10000 host_e.last_state_update = int(time.time()) - 10000 svc0 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_0") svc1 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_1") svc2 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_2") svc3 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_3") svc4 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_4") svc5 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_A", "test_svc_5") assert "o" == svc0.freshness_state assert "w" == svc1.freshness_state assert "c" == svc2.freshness_state assert "u" == svc3.freshness_state assert "x" == svc4.freshness_state assert "x" == svc5.freshness_state # Set last state update in the past... svc0.last_state_update = int(time.time()) - 10000 svc1.last_state_update = int(time.time()) - 10000 svc2.last_state_update = int(time.time()) - 10000 svc3.last_state_update = int(time.time()) - 10000 svc4.last_state_update = int(time.time()) - 10000 svc5.last_state_update = int(time.time()) - 10000 host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.event_handler_enabled = False # Set the host UP - this will run the scheduler loop to check for freshness self.scheduler_loop(1, [[host, 0, 'UP']]) time.sleep(0.1) # Default state remains assert "OK" == svc0.state assert "OK" == svc1.state assert "OK" == svc2.state assert "OK" == svc3.state assert "OK" == svc4.state assert "OK" == svc5.state # Default state remains assert "UP" == host_a.state assert "UP" == host_b.state assert "UP" == host_c.state assert "UP" == host_d.state assert "UP" == host_e.state def test_freshness_default_threshold(self): """ Host/service get the global freshness threshold if they do not define one :return: None """ # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) host_f = self._scheduler.hosts.find_by_name("test_host_F") assert "x" == host_f.freshness_state # Not defined, so default value (0) that is replaced with the global host_freshness_check_interval assert 60 == host_f.freshness_threshold svc6 = self._scheduler.services.find_srv_by_name_and_hostname("test_host_F", "test_svc_6") assert "x" == svc6.freshness_state # Not defined, so default value - default is 0 for no freshness check! assert 60 == svc6.freshness_threshold def test_freshness_expiration_repeat_host(self): """ We test the running property freshness_expired to know if we are in expiration freshness or not - test for an host :return: None """ assert self._arbiter.conf.host_freshness_check_interval == 60 # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) for h in self._scheduler.hosts: print(("Host %s: freshness check: %s (%d s), state: %s/%s, last state update: %s" % (h.get_name(), h.check_freshness, h.freshness_threshold, h.state_type, h.state, h.last_state_update))) host_f = self._scheduler.hosts.find_by_name("test_host_F") print(("Host F: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) print(host_f) host_b = self._scheduler.hosts.find_by_name("test_host_B") print(("Host B: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) print(host_b) assert "x" == host_b.freshness_state assert 1800 == host_b.freshness_threshold # Check attempts assert 0 == host_b.attempt assert 5 == host_b.max_check_attempts # Force freshness threshold and latency host_b.freshness_threshold = 1 host_b.__class__.additional_freshness_latency = 1 host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.event_handler_enabled = False print(("Host: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) assert 0 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) # We are still ok... assert "UP" == host_b.state assert "HARD" == host_b.state_type assert False == host_b.freshness_expired # Wait for more than freshness threshold + latency... time.sleep(3) checks_count = self.manage_freshness_check(1) assert 1 == checks_count print(("Host: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) assert "UNREACHABLE" == host_b.state assert "SOFT" == host_b.state_type assert False == host_b.freshness_expired assert 1 == host_b.attempt time.sleep(1) assert 1 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) assert "UNREACHABLE" == host_b.state assert "SOFT" == host_b.state_type assert False == host_b.freshness_expired assert 2 == host_b.attempt time.sleep(1) assert 1 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) assert "UNREACHABLE" == host_b.state assert "SOFT" == host_b.state_type assert False == host_b.freshness_expired assert 3 == host_b.attempt time.sleep(1) assert 1 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_b.state_type, host_b.state, host_b.last_state_update))) assert "UNREACHABLE" == host_b.state assert "SOFT" == host_b.state_type assert False == host_b.freshness_expired assert 4 == host_b.attempt time.sleep(1) assert 1 == self.manage_freshness_check(1) assert "UNREACHABLE" == host_b.state assert "HARD" == host_b.state_type assert True == host_b.is_max_attempts() assert True == host_b.freshness_expired assert 5 == host_b.attempt # Then no more change for this host ! time.sleep(1) assert 0 == self.manage_freshness_check(1) assert "UNREACHABLE" == host_b.state assert "HARD" == host_b.state_type assert True == host_b.is_max_attempts() assert True == host_b.freshness_expired assert 5 == host_b.attempt self.show_checks() time.sleep(1) assert 0 == self.manage_freshness_check(1) assert "UNREACHABLE" == host_b.state assert "HARD" == host_b.state_type assert True == host_b.is_max_attempts() assert True == host_b.freshness_expired assert 5 == host_b.attempt self.show_logs() # The freshness log is raised for each check attempt assert len(self.get_log_match("alignak.objects.schedulingitem] The freshness period of host 'test_host_B'")) == 5 # [1512800594] WARNING: [alignak.objects.schedulingitem] The freshness period of host 'test_host_B' is expired by 0d 0h 0m 1s (threshold=0d 0h 0m 1s + 1s). Attempt: 1 / 5. I'm forcing the state to freshness state (x / SOFT). # [1512800595] WARNING: [alignak.objects.schedulingitem] The freshness period of host 'test_host_B' is expired by 0d 0h 0m 2s (threshold=0d 0h 0m 1s + 1s). Attempt: 2 / 5. I'm forcing the state to freshness state (x / SOFT). # [1512800596] WARNING: [alignak.objects.schedulingitem] The freshness period of host 'test_host_B' is expired by 0d 0h 0m 3s (threshold=0d 0h 0m 1s + 1s). Attempt: 3 / 5. I'm forcing the state to freshness state (x / SOFT). # [1512800597] WARNING: [alignak.objects.schedulingitem] The freshness period of host 'test_host_B' is expired by 0d 0h 0m 4s (threshold=0d 0h 0m 1s + 1s). Attempt: 4 / 5. I'm forcing the state to freshness state (x / SOFT). # [1512800598] WARNING: [alignak.objects.schedulingitem] The freshness period of host 'test_host_B' is expired by 0d 0h 0m 5s (threshold=0d 0h 0m 1s + 1s). Attempt: 5 / 5. I'm forcing the state to freshness state (x / HARD). assert len(self.get_log_match("Attempt: 1 / 5. ")) == 1 assert len(self.get_log_match("Attempt: 2 / 5. ")) == 1 assert len(self.get_log_match("Attempt: 3 / 5. ")) == 1 assert len(self.get_log_match("Attempt: 4 / 5. ")) == 1 assert len(self.get_log_match("Attempt: 5 / 5. ")) == 1 # Now receive check_result (passive), so we must be outside of freshness_expired excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_B;0;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host_b.state assert 'Host is UP' == host_b.output assert False == host_b.freshness_expired def test_freshness_expiration_repeat_host_2(self): """ We test the running property freshness_expired to know if we are in expiration freshness or not - test for an host (bis) :return: None """ assert self._arbiter.conf.host_freshness_check_interval == 60 # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) for h in self._scheduler.hosts: print(("Host %s: freshness check: %s (%d s), state: %s/%s, last state update: %s" % (h.get_name(), h.check_freshness, h.freshness_threshold, h.state_type, h.state, h.last_state_update))) host_f = self._scheduler.hosts.find_by_name("test_host_F") print(("Host F: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) assert "x" == host_f.freshness_state assert 60 == host_f.freshness_threshold # Check attempts assert 0 == host_f.attempt assert 3 == host_f.max_check_attempts # Force freshness threshold and latency host_f.freshness_threshold = 1 host_f.__class__.additional_freshness_latency = 1 assert 0 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) # We are still ok... assert "UP" == host_f.state assert "HARD" == host_f.state_type assert False == host_f.freshness_expired # Wait for more than freshness threshold + latency... time.sleep(3) checks_count = self.manage_freshness_check(1) assert 1 == checks_count print(("Host: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) assert "UNREACHABLE" == host_f.state assert "SOFT" == host_f.state_type assert False == host_f.freshness_expired assert 1 == host_f.attempt time.sleep(1) assert 1 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) assert "UNREACHABLE" == host_f.state assert "SOFT" == host_f.state_type assert False == host_f.freshness_expired assert 2 == host_f.attempt time.sleep(1) assert 1 == self.manage_freshness_check(1) print(("Host: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) assert "UNREACHABLE" == host_f.state assert "HARD" == host_f.state_type assert True == host_f.freshness_expired assert 3 == host_f.attempt # Then no more change for this host ! time.sleep(1) assert 0 == self.manage_freshness_check(1) assert "UNREACHABLE" == host_f.state assert "HARD" == host_f.state_type assert True == host_f.is_max_attempts() assert True == host_f.freshness_expired assert 3 == host_f.attempt self.show_checks() time.sleep(1) assert 0 == self.manage_freshness_check(1) assert "UNREACHABLE" == host_f.state assert "HARD" == host_f.state_type assert True == host_f.is_max_attempts() assert True == host_f.freshness_expired assert 3 == host_f.attempt self.show_logs() # The freshness log is raised for each check attempt assert len(self.get_log_match("alignak.objects.schedulingitem] The freshness period of host 'test_host_F'")) == 3 assert len(self.get_log_match("Attempt: 1 / 3. ")) == 1 assert len(self.get_log_match("Attempt: 2 / 3. ")) == 1 assert len(self.get_log_match("Attempt: 3 / 3. ")) == 1 # Now receive check_result (passive), so we must be outside of freshness_expired excmd = '[%d] PROCESS_HOST_CHECK_RESULT;test_host_F;0;Host is UP' % time.time() self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'UP' == host_f.state assert 'Host is UP' == host_f.output assert False == host_f.freshness_expired def test_freshness_expiration_repeat_service(self): """ We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service - retry_interval is 1 - max_check_attempts is 3 This test runs with the services declare on test_host_F in the configuration :return: None """ self._freshness_expiration_repeat_service('test_svc_6') def test_freshness_expiration_repeat_service_2(self): """ We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service retry_interval is 0 max_check_attempts is 2 :return: None """ self._freshness_expiration_repeat_service('test_svc_7', count=2) def test_freshness_expiration_repeat_service_3(self): """ We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service retry_interval is 0 max_check_attempts is 1 :return: None """ self._freshness_expiration_repeat_service('test_svc_8', count=1) def _freshness_expiration_repeat_service(self, svc_description, count=3): """ We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service :return: None """ assert self._arbiter.conf.service_freshness_check_interval == 60 assert self._arbiter.conf.host_freshness_check_interval == 60 # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_check_freshness': 1}) for h in self._scheduler.hosts: print(("Host %s: freshness check: %s (%d s), state: %s/%s, last state update: %s" % (h.get_name(), h.check_freshness, h.freshness_threshold, h.state_type, h.state, h.last_state_update))) host_f = self._scheduler.hosts.find_by_name("test_host_F") svc_f = None print(("Host F: state: %s/%s, last state update: %s" % (host_f.state_type, host_f.state, host_f.last_state_update))) for s in host_f.services: s = self._scheduler.services[s] if s.get_name() == svc_description: print(("Service %s: freshness check: %s (%d s), state: %s/%s, last state update: %s" % (s.get_name(), s.check_freshness, s.freshness_threshold, s.state_type, s.state, s.last_state_update))) svc_f = s break assert svc_f is not None assert "x" == svc_f.freshness_state assert 60 == svc_f.freshness_threshold # Check attempts assert 0 == svc_f.attempt assert count == svc_f.max_check_attempts # Force freshness threshold and latency svc_f.freshness_threshold = 1 svc_f.__class__.additional_freshness_latency = 1 # Same as the scheduler list ;) services = [s for s in self._scheduler.services if not self._scheduler.hosts[s.host].freshness_expired and s.check_freshness and not s.freshness_expired and s.passive_checks_enabled and not s.active_checks_enabled] print(("Freshness expired services: %d" % len(services))) # Some potential services to check for freshness services_count = len(services) assert 0 == self.manage_freshness_check(1) print(("Service %s: state: %s/%s, last state update: %s, attempt: %d / %d" % (svc_description, svc_f.state_type, svc_f.state, svc_f.last_state_update, svc_f.attempt, svc_f.max_check_attempts))) # We are still ok... assert "OK" == svc_f.state assert "HARD" == svc_f.state_type assert False == svc_f.freshness_expired # Wait for more than freshness threshold + latency... time.sleep(3) for idx in range(1, count): assert 1 == self.manage_freshness_check() print(("Attempt %d: state: %s/%s, last state update: %s, attempt: %d / %d" % (idx, svc_f.state_type, svc_f.state, svc_f.last_state_update, svc_f.attempt, svc_f.max_check_attempts))) assert "UNREACHABLE" == svc_f.state assert "SOFT" == svc_f.state_type assert False == svc_f.freshness_expired assert svc_f.attempt == idx time.sleep(1) self.show_logs() # Last check loop must raise a freshness expired and max attempts is reached ! assert 1 == self.manage_freshness_check() print(("Last attempt: state: %s/%s, last state update: %s, attempt: %d / %d" % (svc_f.state_type, svc_f.state, svc_f.last_state_update, svc_f.attempt, svc_f.max_check_attempts))) assert "UNREACHABLE" == svc_f.state assert "HARD" == svc_f.state_type assert True == svc_f.is_max_attempts() assert True == svc_f.freshness_expired assert svc_f.attempt == count # assert 1 == self.manage_freshness_check(1) # print("Service: state: %s/%s, last state update: %s" % (svc_f.state_type, svc_f.state, svc_f.last_state_update)) # assert "UNREACHABLE" == svc_f.state # assert "SOFT" == svc_f.state_type # assert False == svc_f.freshness_expired # assert 2 == svc_f.attempt # time.sleep(1) # # assert 1 == self.manage_freshness_check(1) # print("Service: state: %s/%s, last state update: %s" % (svc_f.state_type, svc_f.state, svc_f.last_state_update)) # assert "UNREACHABLE" == svc_f.state # assert "HARD" == svc_f.state_type # assert True == svc_f.freshness_expired # assert 3 == svc_f.attempt # Same as the scheduler list ;) services = [s for s in self._scheduler.services if not self._scheduler.hosts[s.host].freshness_expired and s.check_freshness and not s.freshness_expired and s.passive_checks_enabled and not s.active_checks_enabled] print(("Freshness expired services: %d" % len(services))) # One less service to check now ! assert len(services) == services_count - 1 # Then no more change for this service ... even if 5 more loops are run! for idx in range(1, 5): assert 0 == self.manage_freshness_check(1) assert "UNREACHABLE" == svc_f.state assert "HARD" == svc_f.state_type assert True == svc_f.is_max_attempts() assert True == svc_f.freshness_expired assert svc_f.attempt == count time.sleep(0.5) self.show_checks() self.show_logs() # The freshness log is raised for each check attempt assert len(self.get_log_match( "alignak.objects.schedulingitem] " "The freshness period of service 'test_host_F/%s'" % svc_description)) == count for idx in range(1, count+1): assert len(self.get_log_match("Attempt: %d / %d. " % (idx, svc_f.max_check_attempts))) == 1 assert len(self.get_log_match("x / SOFT")) == count - 1 assert len(self.get_log_match("x / HARD")) == 1 # Now receive check_result (passive), so we must be outside of freshness_expired excmd = "[%d] PROCESS_SERVICE_CHECK_RESULT;test_host_F;%s;0;Service is OK" \ % (time.time(), svc_description) self._scheduler.run_external_commands([excmd]) self.external_command_loop() assert 'OK' == svc_f.state assert "HARD" == svc_f.state_type assert 'Service is OK' == svc_f.output if count > 1: assert False == svc_f.is_max_attempts() else: assert True == svc_f.is_max_attempts() assert False == svc_f.freshness_expired assert svc_f.attempt == 1
class TestPassiveChecks(AlignakTest): ''' This class test passive checks for host and services ''' def setUp(self): pass def test_start_freshness_on_alignak_start(self): ''' When alignak starts, freshness period also starts instead are stale and so in end of freshness :return: None ''' pass def test_freshness_state(self): ''' Test that freshness_state property is correctly defined in item (host or service) :return: None ''' pass def test_freshness_expiration(self): ''' When freshness period expires, set freshness state and output Test that on freshness period expiry, the item gets the freshness_state and its output is 'Freshness period expired' and that no check is scheduled to check the item (host / service) :return: None ''' pass def test_freshness_disabled(self): ''' When freshness is disabled for hosts or service, no state change :return: None ''' pass def test_freshness_default_threshold(self): ''' Host/service get the global freshness threshold if they do not define one :return: None ''' pass def test_freshness_expiration_repeat_host(self): ''' We test the running property freshness_expired to know if we are in expiration freshness or not - test for an host :return: None ''' pass def test_freshness_expiration_repeat_host_2(self): ''' We test the running property freshness_expired to know if we are in expiration freshness or not - test for an host (bis) :return: None ''' pass def test_freshness_expiration_repeat_service(self): ''' We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service - retry_interval is 1 - max_check_attempts is 3 This test runs with the services declare on test_host_F in the configuration :return: None ''' pass def test_freshness_expiration_repeat_service_2(self): ''' We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service retry_interval is 0 max_check_attempts is 2 :return: None ''' pass def test_freshness_expiration_repeat_service_3(self): ''' We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service retry_interval is 0 max_check_attempts is 1 :return: None ''' pass def _freshness_expiration_repeat_service(self, svc_description, count=3): ''' We test the running property freshness_expired to know if we are in expiration freshness or not - test for a service :return: None ''' pass
13
12
57
9
38
10
2
0.29
1
5
0
0
12
0
12
67
697
114
454
78
441
131
424
77
411
8
2
3
25
3,940
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_flapping.py
tests.test_flapping.TestFlapping
class TestFlapping(AlignakTest): """ This class tests the flapping management """ def setUp(self): super(TestFlapping, self).setUp() self.setup_with_file('cfg/cfg_flapping.cfg', dispatching=True) assert self.conf_is_correct def test_flapping(self): """Test host/service flapping detection :return: """ # Get the hosts and services" host = self._scheduler.hosts.find_by_name("test_host_0") host.act_depend_of = [] assert host.flap_detection_enabled router = self._scheduler.hosts.find_by_name("test_router_0") router.act_depend_of = [] assert router.flap_detection_enabled svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.event_handler_enabled = False svc.act_depend_of = [] # Force because the default configuration disables the flapping detection svc.flap_detection_enabled = True self.scheduler_loop(2, [ [host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'OK'] ]) assert 'UP' == host.state assert 'HARD' == host.state_type assert 'UP' == router.state assert 'HARD' == router.state_type assert 'OK' == svc.state assert 'HARD' == svc.state_type assert 25 == svc.low_flap_threshold # Set the service as a problem self.scheduler_loop(3, [ [svc, 2, 'Crit'] ]) assert 'CRITICAL' == svc.state assert 'HARD' == svc.state_type # Ok, now go in flap! for i in range(1, 10): self.scheduler_loop(1, [[svc, 0, 'Ok']]) self.scheduler_loop(1, [[svc, 2, 'Crit']]) # Should be in flapping state now assert svc.is_flapping # We got 'monitoring_log' broks for logging to the monitoring logs... monitoring_logs = [] for brok in sorted(iter(self._main_broker.broks.values()), key=lambda x: x.creation_time): if brok.type == 'monitoring_log': data = unserialize(brok.data) monitoring_logs.append((data['level'], data['message'])) expected_logs = [ ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;Crit'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;CRITICAL;' 'notify-service;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;OK;' 'notify-service;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE FLAPPING ALERT: test_host_0;test_ok_0;STARTED; ' 'Service appears to have started flapping (83.8% change >= 50.0% threshold)'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'FLAPPINGSTART (OK);notify-service;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ] for log_level, log_message in expected_logs: assert (log_level, log_message) in monitoring_logs # Now we put it as back :) # 10 is not enouth to get back as normal for i in range(1, 11): self.scheduler_loop(1, [[svc, 0, 'Ok']]) assert svc.is_flapping # 10 others can be good (near 4.1 %) for i in range(1, 11): self.scheduler_loop(1, [[svc, 0, 'Ok']]) assert not svc.is_flapping # We got 'monitoring_log' broks for logging to the monitoring logs... monitoring_logs = [] for brok in sorted(iter(self._main_broker.broks.values()), key=lambda x: x.creation_time): if brok.type == 'monitoring_log': data = unserialize(brok.data) monitoring_logs.append((data['level'], data['message'])) print(("Logs: %s" % monitoring_logs)) expected_logs = [ ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;Crit'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;CRITICAL;' 'notify-service;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;OK;' 'notify-service;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE FLAPPING ALERT: test_host_0;test_ok_0;STARTED; ' 'Service appears to have started flapping ' '(83.8% change >= 50.0% threshold)'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'FLAPPINGSTART (OK);notify-service;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE FLAPPING ALERT: test_host_0;test_ok_0;STOPPED; ' 'Service appears to have stopped flapping ' '(21.5% change < 25.0% threshold)'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'FLAPPINGSTOP (OK);notify-service;Ok') ] for log_level, log_message in expected_logs: assert (log_level, log_message) in monitoring_logs def test_flapping(self): """ :return: """ # Get the hosts and services" host = self._scheduler.hosts.find_by_name("test_host_0") host.act_depend_of = [] assert host.flap_detection_enabled router = self._scheduler.hosts.find_by_name("test_router_0") router.act_depend_of = [] assert router.flap_detection_enabled svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.event_handler_enabled = False svc.act_depend_of = [] # Force because the default configuration disables the flapping detection svc.flap_detection_enabled = True self.scheduler_loop(2, [ [host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'OK'] ]) assert 'UP' == host.state assert 'HARD' == host.state_type assert 'UP' == router.state assert 'HARD' == router.state_type assert 'OK' == svc.state assert 'HARD' == svc.state_type assert 25 == svc.low_flap_threshold # Set the service as a problem self.scheduler_loop(3, [ [svc, 2, 'Crit'] ]) assert 'CRITICAL' == svc.state assert 'HARD' == svc.state_type # Ok, now go in flap! for i in range(1, 10): self.scheduler_loop(1, [[svc, 0, 'Ok']]) self.scheduler_loop(1, [[svc, 2, 'Crit']]) # Should be in flapping state now assert svc.is_flapping expected_logs = [ ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;0;OK'), ('info', 'ACTIVE HOST CHECK: test_router_0;UP;0;UP | rtt=10'), ('info', 'ACTIVE HOST CHECK: test_host_0;UP;0;UP | value1=1 value2=2'), ('info', 'ACTIVE HOST CHECK: test_host_0;UP;1;UP | value1=1 value2=2'), ('info', 'ACTIVE HOST CHECK: test_router_0;UP;1;UP | rtt=10'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;OK'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;Crit'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'CRITICAL;1;notify-service;Crit'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;2;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;2;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'OK;0;notify-service;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE FLAPPING ALERT: test_host_0;test_ok_0;STARTED; ' 'Service appears to have started flapping (83.8% change >= 50.0% threshold)'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;FLAPPINGSTART ' '(OK);0;notify-service;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ] self.check_monitoring_events_log(expected_logs, dump=True) # Now we put it as back :) # 10 is not enouth to get back as normal for i in range(1, 11): self.scheduler_loop(1, [[svc, 0, 'Ok']]) assert svc.is_flapping # 10 others can be good (near 4.1 %) for i in range(1, 11): self.scheduler_loop(1, [[svc, 0, 'Ok']]) assert not svc.is_flapping # We got 'monitoring_log' broks for logging to the monitoring logs... expected_logs = [ ('info', 'ACTIVE HOST CHECK: test_host_0;UP;0;UP | value1=1 value2=2'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;0;OK'), ('info', 'ACTIVE HOST CHECK: test_router_0;UP;0;UP | rtt=10'), ('info', 'ACTIVE HOST CHECK: test_router_0;UP;1;UP | rtt=10'), ('info', 'ACTIVE HOST CHECK: test_host_0;UP;1;UP | value1=1 value2=2'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;OK'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;Crit'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'CRITICAL;1;notify-service;Crit'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;2;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;2;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'OK;0;notify-service;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'SERVICE FLAPPING ALERT: test_host_0;test_ok_0;STARTED; ' 'Service appears to have started flapping (83.8% change >= 50.0% threshold)'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;FLAPPINGSTART ' '(OK);0;notify-service;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('error', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;Crit'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;Crit'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;SOFT;2;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE FLAPPING ALERT: test_host_0;test_ok_0;STOPPED; ' 'Service appears to have stopped flapping (21.5% change < 25.0% threshold)'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;FLAPPINGSTOP ' '(OK);0;notify-service;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ('info', 'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;Ok'), ] self.check_monitoring_events_log(expected_logs, dump=True)
class TestFlapping(AlignakTest): ''' This class tests the flapping management ''' def setUp(self): pass def test_flapping(self): '''Test host/service flapping detection :return: ''' pass def test_flapping(self): ''' :return: ''' pass
4
3
117
8
101
8
5
0.09
1
2
0
0
3
0
3
58
357
25
304
18
300
28
90
18
86
10
2
2
15
3,941
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_extended_info.py
tests.test_extended_info.TestHostExtended
class TestHostExtended(AlignakTest): def setUp(self): super(TestHostExtended, self).setUp() self.setup_with_file('cfg/extended/extended_info.cfg', dispatching=True) assert self.conf_is_correct self._sched = self._scheduler def test_extended_host_information(self): """ Host extended information """ # Get hosts and services host = self._sched.hosts.find_by_name("host_A") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router self.scheduler_loop(2, [ [host, 0, 'UP | value1=1 value2=2'] ]) assert 'UP' == host.state assert 'HARD' == host.state_type assert 'host.png' == host.icon_image assert 'Alt for icon.png' == host.icon_image_alt assert 'Notes' == host.notes # This parameter is already defined in the host, thus it is not overloaded by the one # in the hostextinfo definition assert '/alignak/wiki/doku.php/$HOSTNAME$' == host.notes_url assert 'vrml.png' == host.vrml_image assert 'map.png' == host.statusmap_image # Not implemented, see #574 # self.assertEqual('1', host['2d_coords']) # self.assertEqual('2', host['3d_coords']) def test_extended_service_information(self): """ Service extended information """ # Get hosts and services host = self._sched.hosts.find_by_name("host_A") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._sched.services.find_srv_by_name_and_hostname("host_A", "svc_A") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults self.scheduler_loop(2, [ [svc, 0, 'OK'] ]) assert 'OK' == svc.state assert 'HARD' == svc.state_type assert 'service.png' == svc.icon_image assert 'Alt for service.png' == svc.icon_image_alt assert 'Notes for a service' == svc.notes assert 'http://Notes_url/service' == svc.notes_url
class TestHostExtended(AlignakTest): def setUp(self): pass def test_extended_host_information(self): ''' Host extended information ''' pass def test_extended_service_information(self): ''' Service extended information ''' pass
4
2
16
2
12
3
1
0.32
1
1
0
0
3
1
3
58
56
9
38
8
34
12
33
8
29
1
2
0
3
3,942
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/borg.py
alignak.borg.Borg
class Borg(object): # pylint: disable=R0903 """Borg class define a simple __shared_state class attribute. __dict__ points to this value when calling __init__ This is used to make a Singleton-like pattern with a python object that inherits from the Borg. The Singleton design pattern (DP) has a catchy name, but the wrong focus -- on identity rather than on state. The Borg design pattern has all instances share state instead, and Python makes it, literally, a snap. """ __shared_state = {} def __init__(self): self.__dict__ = self.__class__.__shared_state
class Borg(object): '''Borg class define a simple __shared_state class attribute. __dict__ points to this value when calling __init__ This is used to make a Singleton-like pattern with a python object that inherits from the Borg. The Singleton design pattern (DP) has a catchy name, but the wrong focus -- on identity rather than on state. The Borg design pattern has all instances share state instead, and Python makes it, literally, a snap. ''' def __init__(self): pass
2
1
2
0
2
0
1
2
1
0
0
1
1
1
1
1
14
3
4
4
2
8
4
4
2
1
1
0
1
3,943
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/bin/alignak_environment.py
alignak.bin.alignak_environment.AlignakConfigParser
class AlignakConfigParser(object): """ Class to parse the Alignak main configuration file """ def __init__(self, args=None): """ Setup the configuration parser When used without args, it is considered as called by the Alignak daemon creation and the command line parser is not invoked. If called without args, it is considered as called from the command line and all the configuration file variables are output to the console with an 'export VARIABLE=value' format to be sourced to declare shell environment variables. :param args: """ # Alignak version as a property self.alignak_version = __version__ self.export = False self.embedded = True if args is None: # Get command line parameters try: args = docopt(__doc__) except DocoptExit as exp: print("Command line parsing error:\n%s." % (exp)) exit(64) # Used as an independent script self.embedded = False # Print export commands for the calling shell self.export = True # Verbose self.verbose = False if '--verbose' in args and args['--verbose']: print("Alignak environment parser:") print("- verbose mode is On") self.verbose = True # Get the targeted item self.configuration_file = args['<cfg_file>'] if self.verbose: print("- configuration file name: %s" % self.configuration_file) if self.configuration_file is None: print("* missing configuration file name. Please provide a configuration " "file name in the command line parameters") if self.embedded: raise ValueError exit(64) self.configuration_file = os.path.abspath(self.configuration_file) if not os.path.exists(self.configuration_file): print("* required configuration file does not exist: %s" % self.configuration_file) if self.embedded: raise ValueError exit(1) def parse(self): # pylint: disable=too-many-branches """ Check if some extra configuration files are existing in an `alignak.d` sub directory near the found configuration file. Parse the Alignak configuration file(s) Exit the script if some errors are encountered. :return: True/False """ # Search if some ini files existe in an alignak.d sub-directory sub_directory = 'alignak.d' dir_name = os.path.dirname(self.configuration_file) dir_name = os.path.join(dir_name, sub_directory) self.cfg_files = [self.configuration_file] if os.path.exists(dir_name): for root, _, walk_files in os.walk(dir_name, followlinks=True): for found_file in walk_files: if not re.search(r"\.ini$", found_file): continue self.cfg_files.append(os.path.join(root, found_file)) print("Loading configuration files: %s " % self.cfg_files) # Read and parse the found configuration files self.config = configparser.ConfigParser() try: self.config.read(self.cfg_files) if self.config._sections == {}: print("* bad formatted configuration file: %s " % self.configuration_file) if self.embedded: raise ValueError sys.exit(2) for section in self.config.sections(): if self.verbose: print("- section: %s" % section) for (key, value) in self.config.items(section): inner_property = "%s.%s" % (section, key) # Set object property setattr(self, inner_property, value) # Set environment variable try: # Python 3 os.environ[inner_property] = value except UnicodeEncodeError: # Python 2 os.environ[inner_property] = value.encode('utf8') if self.verbose: print(" %s = %s" % (inner_property, value)) if self.export: # Allowed shell variables may only contain: [a-zA-z0-9_] inner_property = re.sub('[^0-9a-zA-Z]+', '_', inner_property) inner_property = inner_property.upper() print("export %s=%s" % (inner_property, cmd_quote(value))) except configparser.ParsingError as exp: print("* parsing error in config file : %s\n%s" % (self.configuration_file, exp.message)) if self.embedded: return False sys.exit(3) except configparser.InterpolationMissingOptionError as exp: print("* incorrect or missing variable: %s" % str(exp)) if self.embedded: return False sys.exit(3) if self.verbose: print("Configuration file parsed correctly") return True def write(self, env_file): """ Write the Alignak configuration to a file :param env_file: file name to dump the configuration :type env_file: str :return: True/False """ try: with open(env_file, "w") as out_file: self.config.write(out_file) except Exception as exp: # pylint: disable=broad-except print("Dumping environment file raised an error: %s. " % exp) def _search_sections(self, searched_sections=''): """ Search sections in the configuration which name starts with the provided search criteria :param searched_sections: :return: a dict containing the found sections and their parameters """ found_sections = {} # Get the daemons related properties for section in self.config.sections(): if not section.startswith(searched_sections): continue if section not in found_sections: found_sections.update({section: {'imported_from': self.configuration_file}}) for (key, value) in self.config.items(section): found_sections[section].update({key: value}) return found_sections def get_defaults(self): """ Get all the parameters defined in the DEFAULT ini file section... :return: a dict containing the default parameters """ return self.config.defaults() def get_legacy_cfg_files(self): """ Get the Alignak monitored configuration files. :return: a dict containing the Alignak legacy configuration files """ return self.get_alignak_configuration(legacy_cfg=True) def get_alignak_macros(self): """ Get the Alignak macros. :return: a dict containing the Alignak macros """ macros = self.get_alignak_configuration(macros=True) sections = self._search_sections('pack.') for name, _ in list(sections.items()): section_macros = self.get_alignak_configuration(section=name, macros=True) macros.update(section_macros) return macros def get_alignak_configuration(self, section=SECTION_CONFIGURATION, legacy_cfg=False, macros=False): """ Get the Alignak configuration parameters. All the variables included in the SECTION_CONFIGURATION section except the variables starting with 'cfg' and the macros. If `lecagy_cfg` is True, this function only returns the variables included in the SECTION_CONFIGURATION section except the variables starting with 'cfg' If `macros` is True, this function only returns the variables included in the SECTION_CONFIGURATION section that are considered as macros :param section: name of the sectio nto search for :type section: str :param legacy_cfg: only get the legacy cfg declarations :type legacy_cfg: bool :param macros: only get the macros declarations :type macros: bool :return: a dict containing the Alignak configuration parameters """ configuration = self._search_sections(section) if section not in configuration: return [] for prop, _ in list(configuration[section].items()): # Only legacy configuration items if legacy_cfg: if not prop.startswith('cfg'): configuration[section].pop(prop) continue # Only macro definitions if macros: if not prop.startswith('_') and not prop.startswith('$'): configuration[section].pop(prop) continue # All values except legacy configuration and macros if prop.startswith('cfg') or prop.startswith('_') or prop.startswith('$'): configuration[section].pop(prop) return configuration[section] def get_daemons(self, daemon_name=None, daemon_type=None): """ Get the daemons configuration parameters If name is provided, get the configuration for this daemon, else, If type is provided, get the configuration for all the daemons of this type, else get the configuration of all the daemons. :param daemon_name: the searched daemon name :param daemon_type: the searched daemon type :return: a dict containing the daemon(s) configuration parameters """ if daemon_name is not None: sections = self._search_sections('daemon.%s' % daemon_name) if 'daemon.%s' % daemon_name in sections: return sections['daemon.' + daemon_name] return {} if daemon_type is not None: sections = self._search_sections('daemon.') for name, daemon in list(sections.items()): if 'type' not in daemon or not daemon['type'] == daemon_type: sections.pop(name) return sections return self._search_sections('daemon.') def get_modules(self, name=None, daemon_name=None, names_only=True): """ Get the modules configuration parameters If name is provided, get the configuration for this module, else, If daemon_name is provided, get the configuration for all the modules of this daemon, else get the configuration of all the modules. :param name: the searched module name :param daemon_name: the modules of this daemon :param names_only: if True only returns the modules names, else all the configuration data :return: a dict containing the module(s) configuration parameters """ if name is not None: sections = self._search_sections('module.' + name) if 'module.' + name in sections: return sections['module.' + name] return {} if daemon_name is not None: section = self.get_daemons(daemon_name) if 'modules' in section and section['modules']: modules = [] for module_name in section['modules'].split(','): if names_only: modules.append(module_name) else: modules.append(self.get_modules(name=module_name)) return modules return [] return self._search_sections('module.')
class AlignakConfigParser(object): ''' Class to parse the Alignak main configuration file ''' def __init__(self, args=None): ''' Setup the configuration parser When used without args, it is considered as called by the Alignak daemon creation and the command line parser is not invoked. If called without args, it is considered as called from the command line and all the configuration file variables are output to the console with an 'export VARIABLE=value' format to be sourced to declare shell environment variables. :param args: ''' pass def parse(self): ''' Check if some extra configuration files are existing in an `alignak.d` sub directory near the found configuration file. Parse the Alignak configuration file(s) Exit the script if some errors are encountered. :return: True/False ''' pass def write(self, env_file): ''' Write the Alignak configuration to a file :param env_file: file name to dump the configuration :type env_file: str :return: True/False ''' pass def _search_sections(self, searched_sections=''): ''' Search sections in the configuration which name starts with the provided search criteria :param searched_sections: :return: a dict containing the found sections and their parameters ''' pass def get_defaults(self): ''' Get all the parameters defined in the DEFAULT ini file section... :return: a dict containing the default parameters ''' pass def get_legacy_cfg_files(self): ''' Get the Alignak monitored configuration files. :return: a dict containing the Alignak legacy configuration files ''' pass def get_alignak_macros(self): ''' Get the Alignak macros. :return: a dict containing the Alignak macros ''' pass def get_alignak_configuration(self, section=SECTION_CONFIGURATION, legacy_cfg=False, macros=False): ''' Get the Alignak configuration parameters. All the variables included in the SECTION_CONFIGURATION section except the variables starting with 'cfg' and the macros. If `lecagy_cfg` is True, this function only returns the variables included in the SECTION_CONFIGURATION section except the variables starting with 'cfg' If `macros` is True, this function only returns the variables included in the SECTION_CONFIGURATION section that are considered as macros :param section: name of the sectio nto search for :type section: str :param legacy_cfg: only get the legacy cfg declarations :type legacy_cfg: bool :param macros: only get the macros declarations :type macros: bool :return: a dict containing the Alignak configuration parameters ''' pass def get_daemons(self, daemon_name=None, daemon_type=None): ''' Get the daemons configuration parameters If name is provided, get the configuration for this daemon, else, If type is provided, get the configuration for all the daemons of this type, else get the configuration of all the daemons. :param daemon_name: the searched daemon name :param daemon_type: the searched daemon type :return: a dict containing the daemon(s) configuration parameters ''' pass def get_modules(self, name=None, daemon_name=None, names_only=True): ''' Get the modules configuration parameters If name is provided, get the configuration for this module, else, If daemon_name is provided, get the configuration for all the modules of this daemon, else get the configuration of all the modules. :param name: the searched module name :param daemon_name: the modules of this daemon :param names_only: if True only returns the modules names, else all the configuration data :return: a dict containing the module(s) configuration parameters ''' pass
11
11
29
4
16
9
6
0.61
1
8
0
0
10
7
10
10
300
47
158
45
146
96
154
40
143
18
1
4
59
3,944
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/autoslots.py
alignak.autoslots.AutoSlots
class AutoSlots(type): """AutoSlots inherit from type, it's compulsory for metaclass statement """ def __new__(mcs, name, bases, dct): """Called when we create a new Class Some properties names are not allowed in __slots__ like 2d_coords of Host, so we must tag them in properties with no_slots :param mcs: AutoSlots :type mcs: object :param name: string of the Class (like Service) :type name: str :param bases: Classes of which Class inherits (like SchedulingItem) :type bases: object :param dct: new Class dict (like all method of Service) :type dct: object :return: new object :rtype: object """ # Thanks to Bertrand Mathieu to the set idea slots = dct.get('__slots__', set()) # Now get properties from properties and running_properties if 'properties' in dct: props = dct['properties'] slots.update((p for p in props if not props[p].no_slots)) if 'running_properties' in dct: props = dct['running_properties'] slots.update((p for p in props if not props[p].no_slots)) dct['__slots__'] = tuple(slots) return type.__new__(mcs, name, bases, dct)
class AutoSlots(type): '''AutoSlots inherit from type, it's compulsory for metaclass statement ''' def __new__(mcs, name, bases, dct): '''Called when we create a new Class Some properties names are not allowed in __slots__ like 2d_coords of Host, so we must tag them in properties with no_slots :param mcs: AutoSlots :type mcs: object :param name: string of the Class (like Service) :type name: str :param bases: Classes of which Class inherits (like SchedulingItem) :type bases: object :param dct: new Class dict (like all method of Service) :type dct: object :return: new object :rtype: object ''' pass
2
2
27
1
10
16
3
1.64
1
2
0
0
1
0
1
14
32
3
11
4
9
18
11
4
9
3
2
1
3
3,945
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/alignakobject.py
alignak.alignakobject.AlignakObject
class AlignakObject(object): """This class provides a generic way to instantiate alignak objects. Attributes are serialized dynamically, whether we un-serialize them create them at run / parsing time """ properties = {} macros = {} def __init__(self, params=None, parsing=True): # pylint: disable=unused-argument """ If parsing is True, then the objects are created from an initial configuration read by the Alignak arbiter else the objects are restored from a previously serialized instance sent by the arbiter to another daemon. This function checks the object uuid in the following manner: - in parsing mode, this function simply creates an object uuid - in non parsing mode, this function restore the object attributes from the provided params :param params: initialization parameters :type params: dict :param parsing: configuration parsing phase :type parsing: bool """ if parsing: # Do not manage anything in the properties, it is the job of the Item __init__ function if not hasattr(self, 'uuid'): self.uuid = get_a_new_object_id() return # Fill the default if we are not parsing a configuration. # This will define some probable missing properties self.fill_default() if params is None: # Object is created without any parameters return if 'uuid' not in params: self.uuid = get_a_new_object_id() all_props = {} all_props.update(getattr(self, "properties", {})) all_props.update(getattr(self, "running_properties", {})) for key, value in params.items(): setattr(self, key, value) # pylint: disable=unused-argument def serialize(self, no_json=True, printing=False): """This function serializes into a simple dictionary object. It is used when transferring data to other daemons over the network (http) Here is the generic function that simply export attributes declared in the properties dictionary of the object. :return: Dictionary containing key and value from properties :rtype: dict """ # uuid is not in *_properties res = { 'uuid': self.uuid } for prop in self.__class__.properties: if not hasattr(self, prop): continue res[prop] = getattr(self, prop) return res def fill_default(self): """ Define the object properties with a default value when the property is not yet defined :return: None """ for prop, entry in self.__class__.properties.items(): if hasattr(self, prop): continue if not hasattr(entry, 'default'): continue if not entry.has_default: continue if hasattr(entry.default, '__iter__'): setattr(self, prop, copy(entry.default)) else: setattr(self, prop, entry.default)
class AlignakObject(object): '''This class provides a generic way to instantiate alignak objects. Attributes are serialized dynamically, whether we un-serialize them create them at run / parsing time ''' def __init__(self, params=None, parsing=True): ''' If parsing is True, then the objects are created from an initial configuration read by the Alignak arbiter else the objects are restored from a previously serialized instance sent by the arbiter to another daemon. This function checks the object uuid in the following manner: - in parsing mode, this function simply creates an object uuid - in non parsing mode, this function restore the object attributes from the provided params :param params: initialization parameters :type params: dict :param parsing: configuration parsing phase :type parsing: bool ''' pass def serialize(self, no_json=True, printing=False): '''This function serializes into a simple dictionary object. It is used when transferring data to other daemons over the network (http) Here is the generic function that simply export attributes declared in the properties dictionary of the object. :return: Dictionary containing key and value from properties :rtype: dict ''' pass def fill_default(self): ''' Define the object properties with a default value when the property is not yet defined :return: None ''' pass
4
4
26
5
12
10
5
0.87
1
0
0
9
3
1
3
3
91
19
39
12
35
34
36
12
32
6
1
2
15
3,946
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/action.py
alignak.action.ActionError
class ActionError(Exception): """Exception raised for errors when executing actions Attributes: msg -- explanation of the error """ def __init__(self, msg): super(ActionError, self).__init__() self.message = msg def __str__(self): # pragma: no cover """Exception to String""" return "Action error: %s" % self.message
class ActionError(Exception): '''Exception raised for errors when executing actions Attributes: msg -- explanation of the error ''' def __init__(self, msg): pass def __str__(self): '''Exception to String''' pass
3
2
3
0
3
1
1
1
1
1
0
0
2
1
2
12
14
3
6
4
3
6
6
4
3
1
3
0
2
3,947
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/action.py
alignak.action.ActionBase
class ActionBase(AlignakObject): # pylint: disable=too-many-instance-attributes """ This abstract class is used to have a common base for both actions (event handlers and notifications) and checks. The Action may be on internal one if it does require to use a Worker process to run the action because the Scheduler is able to resolve the action by itseld. This class is specialized according to the running OS. Currently, only Linux/Unix like OSes are tested """ process = None properties = { 'is_a': StringProp(default=u''), 'type': StringProp(default=u''), 'internal': BoolProp(default=False), 'creation_time': FloatProp(default=0.0), '_is_orphan': BoolProp(default=False), '_in_timeout': BoolProp(default=False), 'status': StringProp(default=ACT_STATUS_SCHEDULED), 'exit_status': IntegerProp(default=3), 'output': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'long_output': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'perf_data': StringProp(default=u'', fill_brok=[FULL_STATUS]), 't_to_go': FloatProp(default=0.0), 'check_time': IntegerProp(default=0), 'last_poll': IntegerProp(default=0), 'execution_time': FloatProp(default=0.0), 'wait_time': FloatProp(default=0.001), 'u_time': FloatProp(default=0.0), 's_time': FloatProp(default=0.0), 'reactionner_tag': StringProp(default=u'None'), 'env': DictProp(default={}), 'module_type': StringProp(default=u'fork', fill_brok=[FULL_STATUS]), 'my_worker': StringProp(default=u'none'), 'command': StringProp(default=''), 'timeout': IntegerProp(default=10), 'ref': StringProp(default=u'unset'), 'ref_type': StringProp(default=u'unset'), 'my_scheduler': StringProp(default=u'unassigned'), } def __init__(self, params=None, parsing=False): super(ActionBase, self).__init__(params, parsing=parsing) # Set a creation time only if not provided if not params or 'creation_time' not in params: self.creation_time = time.time() # Set actions log only if not provided if not params or 'log_actions' not in params: self.log_actions = 'ALIGNAK_LOG_ACTIONS' in os.environ # Fill default parameters self.fill_default() def is_launchable(self, timestamp): """Check if this action can be launched based on current time :param timestamp: time to compare :type timestamp: int :return: True if timestamp >= self.t_to_go, False otherwise :rtype: bool """ if self.t_to_go is None: return False return timestamp >= self.t_to_go def get_local_environnement(self): """ Mix the environment and the environment variables into a new local environment dictionary Note: We cannot just update the global os.environ because this would effect all other checks. :return: local environment variables :rtype: dict """ # Do not use copy.copy() here, as the resulting copy still # changes the real environment (it is still a os._Environment # instance). local_env = os.environ.copy() for local_var in self.env: local_env[local_var] = self.env[local_var] return local_env def execute(self): """Start this action command in a subprocess. :raise: ActionError 'toomanyopenfiles' if too many opened files on the system 'no_process_launched' if arguments parsing failed 'process_launch_failed': if the process launch failed :return: reference to the started process :rtype: psutil.Process """ self.status = ACT_STATUS_LAUNCHED self.check_time = time.time() self.wait_time = 0.0001 self.last_poll = self.check_time # Get a local env variables with our additional values self.local_env = self.get_local_environnement() # Initialize stdout and stderr. self.stdoutdata = '' self.stderrdata = '' logger.debug("Launch command: '%s', ref: %s, timeout: %s", self.command, self.ref, self.timeout) if self.log_actions: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Launch command: '%s'", self.command) else: logger.info("Launch command: '%s'", self.command) return self._execute() # OS specific part def get_outputs(self, out, max_plugins_output_length): """Get check outputs from single output (split perfdata etc). Updates output, perf_data and long_output attributes. :param out: output data of a check :type out: str :param max_output: max plugin data length :type max_output: int :return: None """ # Squeeze all output after max_plugins_output_length out = out[:max_plugins_output_length] # manage escaped pipes out = out.replace(r'\|', '___PROTECT_PIPE___') # Then cuts by lines elts = out.split('\n') # For perf data elts_line1 = elts[0].split('|') # First line before | is output, strip it self.output = elts_line1[0].strip().replace('___PROTECT_PIPE___', '|') try: self.output = self.output.decode('utf8', 'ignore') except UnicodeEncodeError: pass except AttributeError: pass # Init perfdata as empty self.perf_data = '' # After | it is perfdata, strip it if len(elts_line1) > 1: self.perf_data = elts_line1[1].strip().replace('___PROTECT_PIPE___', '|') # Now manage others lines. Before the | it's long_output # And after it's all perf_data, \n joined long_output = [] in_perfdata = False for line in elts[1:]: # if already in perfdata, direct append if in_perfdata: self.perf_data += ' ' + line.strip().replace('___PROTECT_PIPE___', '|') else: # not already in perf_data, search for the | part :) elts = line.split('|', 1) # The first part will always be long_output long_output.append(elts[0].strip().replace('___PROTECT_PIPE___', '|')) if len(elts) > 1: in_perfdata = True self.perf_data += ' ' + elts[1].strip().replace('___PROTECT_PIPE___', '|') # long_output is all non output and performance data, joined with \n self.long_output = '\n'.join(long_output) # Get sure the performance data are stripped self.perf_data = self.perf_data.strip() logger.debug("Command result for '%s': %d, %s", self.command, self.exit_status, self.output) if self.log_actions: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Check result for '%s': %d, %s", self.command, self.exit_status, self.output) if self.perf_data: logger.warning("Performance data for '%s': %s", self.command, self.perf_data) else: logger.info("Check result for '%s': %d, %s", self.command, self.exit_status, self.output) if self.perf_data: logger.info("Performance data for '%s': %s", self.command, self.perf_data) def check_finished(self, max_plugins_output_length): # pylint: disable=too-many-branches """Handle action if it is finished (get stdout, stderr, exit code...) :param max_plugins_output_length: max plugin data length :type max_plugins_output_length: int :return: None """ self.last_poll = time.time() _, _, child_utime, child_stime, _ = os.times() # Not yet finished... if self.process.poll() is None: # We must wait, but checks are variable in time so we do not wait the same # for a little check or a long ping. So we do like TCP: slow start with a very # shot time (0.0001 s) increased *2 but do not wait more than 0.5 s. self.wait_time = min(self.wait_time * 2, 0.5) now = time.time() # This log is really spamming... uncomment if you really need this information :) # logger.debug("%s - Process pid=%d is still alive", now, self.process.pid) # Get standard outputs in non blocking mode from the process streams stdout = no_block_read(self.process.stdout) stderr = no_block_read(self.process.stderr) try: self.stdoutdata += stdout.decode("utf-8") self.stderrdata += stderr.decode("utf-8") except AttributeError: pass if (now - self.check_time) > self.timeout: logger.warning("Process pid=%d spent too much time: %.2f seconds", self.process.pid, now - self.check_time) self._in_timeout = True self._kill() self.status = ACT_STATUS_TIMEOUT self.execution_time = now - self.check_time self.exit_status = 3 if self.log_actions: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Action '%s' exited on timeout (%d s)", self.command, self.timeout) else: logger.info("Action '%s' exited on timeout (%d s)", self.command, self.timeout) # Do not keep the process objcet del self.process # Replace stdout with stderr if stdout is empty self.stdoutdata = self.stdoutdata.strip() if not self.stdoutdata: self.stdoutdata = self.stderrdata # Now grep what we want in the output self.get_outputs(self.stdoutdata, max_plugins_output_length) # We can clean the useless properties now del self.stdoutdata del self.stderrdata # Get the user and system time _, _, n_child_utime, n_child_stime, _ = os.times() self.u_time = n_child_utime - child_utime self.s_time = n_child_stime - child_stime return return logger.debug("Process pid=%d exited with %d", self.process.pid, self.process.returncode) if fcntl: # Get standard outputs in non blocking mode from the process streams stdout = no_block_read(self.process.stdout) stderr = no_block_read(self.process.stderr) else: # Get standard outputs from the communicate function (stdout, stderr) = self.process.communicate() try: self.stdoutdata += stdout.decode("utf-8") except (UnicodeDecodeError, AttributeError): self.stdoutdata += stdout try: self.stderrdata += stderr.decode("utf-8") except (UnicodeDecodeError, AttributeError): self.stderrdata += stderr self.exit_status = self.process.returncode if self.log_actions: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Action '%s' exited with code %d", self.command, self.exit_status) else: logger.info("Action '%s' exited with code %d", self.command, self.exit_status) # We do not need the process now del self.process # check for bad syntax in command line: if (self.stderrdata.find('sh: -c: line 0: unexpected EOF') >= 0 or (self.stderrdata.find('sh: -c: ') >= 0 and self.stderrdata.find(': Syntax') >= 0 or self.stderrdata.find('Syntax error: Unterminated quoted string') >= 0)): logger.warning("Bad syntax in command line!") # Very, very ugly. But subprocess._handle_exitstatus does # not see a difference between a regular "exit 1" and a # bailing out shell. Strange, because strace clearly shows # a difference. (exit_group(1) vs. exit_group(257)) self.stdoutdata = self.stdoutdata + self.stderrdata self.exit_status = 3 # Make sure that exit code is a valid exit code if self.exit_status not in VALID_EXIT_STATUS: self.exit_status = 3 # Replace stdout with stderr if stdout is empty self.stdoutdata = self.stdoutdata.strip() if not self.stdoutdata: self.stdoutdata = self.stderrdata # Now grep what we want in the output self.get_outputs(self.stdoutdata, max_plugins_output_length) # We can clean the useless properties now del self.stdoutdata del self.stderrdata self.status = ACT_STATUS_DONE self.execution_time = time.time() - self.check_time # Also get the system and user times _, _, n_child_utime, n_child_stime, _ = os.times() self.u_time = n_child_utime - child_utime self.s_time = n_child_stime - child_stime def copy_shell__(self, new_i): """Create all attributes listed in 'ONLY_COPY_PROP' and return `self` with these attributes. :param new_i: object to :type new_i: object :return: object with new properties added :rtype: object """ for prop in ONLY_COPY_PROP: setattr(new_i, prop, getattr(self, prop)) return new_i def got_shell_characters(self): """Check if the command_attribute (command line) has shell characters Shell characters are : '!', '$', '^', '&', '*', '(', ')', '~', '[', ']', '|', '{', '}', ';', '<', '>', '?', '`' :return: True if one shell character is found, False otherwise :rtype: bool """ return any(c in SHELLCHARS for c in self.command) def _execute(self, force_shell=False): """Execute action in a subprocess :return: None """ pass def _kill(self): """Kill the action and close fds :return: None """ pass
class ActionBase(AlignakObject): ''' This abstract class is used to have a common base for both actions (event handlers and notifications) and checks. The Action may be on internal one if it does require to use a Worker process to run the action because the Scheduler is able to resolve the action by itseld. This class is specialized according to the running OS. Currently, only Linux/Unix like OSes are tested ''' def __init__(self, params=None, parsing=False): pass def is_launchable(self, timestamp): '''Check if this action can be launched based on current time :param timestamp: time to compare :type timestamp: int :return: True if timestamp >= self.t_to_go, False otherwise :rtype: bool ''' pass def get_local_environnement(self): ''' Mix the environment and the environment variables into a new local environment dictionary Note: We cannot just update the global os.environ because this would effect all other checks. :return: local environment variables :rtype: dict ''' pass def execute(self): '''Start this action command in a subprocess. :raise: ActionError 'toomanyopenfiles' if too many opened files on the system 'no_process_launched' if arguments parsing failed 'process_launch_failed': if the process launch failed :return: reference to the started process :rtype: psutil.Process ''' pass def get_outputs(self, out, max_plugins_output_length): '''Get check outputs from single output (split perfdata etc). Updates output, perf_data and long_output attributes. :param out: output data of a check :type out: str :param max_output: max plugin data length :type max_output: int :return: None ''' pass def check_finished(self, max_plugins_output_length): '''Handle action if it is finished (get stdout, stderr, exit code...) :param max_plugins_output_length: max plugin data length :type max_plugins_output_length: int :return: None ''' pass def copy_shell__(self, new_i): '''Create all attributes listed in 'ONLY_COPY_PROP' and return `self` with these attributes. :param new_i: object to :type new_i: object :return: object with new properties added :rtype: object ''' pass def got_shell_characters(self): '''Check if the command_attribute (command line) has shell characters Shell characters are : '!', '$', '^', '&', '*', '(', ')', '~', '[', ']', '|', '{', '}', ';', '<', '>', '?', '`' :return: True if one shell character is found, False otherwise :rtype: bool ''' pass def _execute(self, force_shell=False): '''Execute action in a subprocess :return: None ''' pass def _kill(self): '''Kill the action and close fds :return: None ''' pass
11
10
31
5
16
10
4
0.5
1
4
0
2
10
17
10
13
391
61
221
43
210
111
151
43
140
15
2
4
41
3,948
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/acknowledge.py
alignak.acknowledge.Acknowledge
class Acknowledge(AlignakObject): # pylint: disable=R0903 """ Allows you to acknowledge the current problem for the specified service. By acknowledging the current problem, future notifications (for the same service state) are disabled. If the acknowledge is "sticky", the acknowledgement will remain until the service returns to an OK state. Otherwise the acknowledgement will automatically be removed when the service state changes. If the acknowledge is "notify", a notification will be sent out to contacts indicating that the current service problem has been acknowledged and when the acknowledge is cleared. """ my_type = 'acknowledge' properties = { 'sticky': BoolProp(default=True), 'notify': BoolProp(default=False), 'end_time': IntegerProp(default=0), 'author': StringProp(default=u'Alignak'), 'comment': StringProp(default=u''), 'comment_id': StringProp(default=u'') } def __init__(self, params=None, parsing=False): super(Acknowledge, self).__init__(params, parsing=parsing) self.fill_default() def serialize(self, no_json=True, printing=False): """This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here we directly return all attributes :return: json representation of a Acknowledge :rtype: dict """ return {'uuid': self.uuid, 'ref': self.ref, 'sticky': self.sticky, 'notify': self.notify, 'end_time': self.end_time, 'author': self.author, 'comment': self.comment} def get_raise_brok(self, host_name, service_name=''): """Get a start acknowledge brok :param host_name: :param service_name: :return: brok with wanted data :rtype: alignak.brok.Brok """ data = self.serialize() data['host'] = host_name if service_name != '': data['service'] = service_name return Brok({'type': 'acknowledge_raise', 'data': data}) def get_expire_brok(self, host_name, service_name=''): """Get an expire acknowledge brok :type item: item :return: brok with wanted data :rtype: alignak.brok.Brok """ data = self.serialize() data['host'] = host_name if service_name != '': data['service'] = service_name return Brok({'type': 'acknowledge_expire', 'data': data})
class Acknowledge(AlignakObject): ''' Allows you to acknowledge the current problem for the specified service. By acknowledging the current problem, future notifications (for the same service state) are disabled. If the acknowledge is "sticky", the acknowledgement will remain until the service returns to an OK state. Otherwise the acknowledgement will automatically be removed when the service state changes. If the acknowledge is "notify", a notification will be sent out to contacts indicating that the current service problem has been acknowledged and when the acknowledge is cleared. ''' def __init__(self, params=None, parsing=False): pass def serialize(self, no_json=True, printing=False): '''This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here we directly return all attributes :return: json representation of a Acknowledge :rtype: dict ''' pass def get_raise_brok(self, host_name, service_name=''): '''Get a start acknowledge brok :param host_name: :param service_name: :return: brok with wanted data :rtype: alignak.brok.Brok ''' pass def get_expire_brok(self, host_name, service_name=''): '''Get an expire acknowledge brok :type item: item :return: brok with wanted data :rtype: alignak.brok.Brok ''' pass
5
4
11
2
5
4
2
0.85
1
2
1
0
4
0
4
7
76
14
34
9
29
29
20
9
15
2
2
1
6
3,949
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/alignak/http/arbiter_interface.py
alignak.http.arbiter_interface.ArbiterInterface
class ArbiterInterface(GenericInterface): """This module provide a specific HTTP interface for an Arbiter daemon.""" ##### # _____ _ # | ____| __ __ _ __ ___ ___ ___ __| | # | _| \ \/ / | '_ \ / _ \ / __| / _ \ / _` | # | |___ > < | |_) | | (_) | \__ \ | __/ | (_| | # |_____| /_/\_\ | .__/ \___/ |___/ \___| \__,_| # |_| ##### @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def reload_configuration(self): """Ask to the arbiter to reload the monitored configuration **Note** tha the arbiter will not reload its main configuration file (eg. alignak.ini) but it will reload the monitored objects from the Nagios legacy files or from the Alignak backend! In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error :return: True if configuration reload is accepted """ # If I'm not the master arbiter, ignore the command and raise a log if not self.app.is_master: message = u"I received a request to reload the monitored configuration. " \ u"I am not the Master arbiter, I ignore and continue to run." logger.warning(message) return {'_status': u'ERR', '_message': message} message = "I received a request to reload the monitored configuration" if self.app.loading_configuration: message = message + "and I am still reloading the monitored configuration ;)" else: self.app.need_config_reload = True logger.warning(message) return {'_status': u'OK', '_message': message} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def backend_notification(self, event=None, parameters=None): """The Alignak backend raises an event to the Alignak arbiter ----- Possible events are: - creation, for a realm or an host creation - deletion, for a realm or an host deletion Calls the reload configuration function if event is creation or deletion Else, nothing for the moment! In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error The `_status` field is 'OK' with an according `_message` to explain what the Arbiter will do depending upon the notification. :return: dict """ # request_parameters = cherrypy.request.json # event = request_parameters.get('event', event) # parameters = request_parameters.get('parameters', parameters) if event is None: data = cherrypy.request.json event = data.get('event', None) if parameters is None: data = cherrypy.request.json parameters = data.get('parameters', None) logger.warning("I got a backend notification: %s / %s", event, parameters) # For a configuration reload event... if event in ['creation', 'deletion']: # If I'm the master, ignore the command and raise a log if not self.app.is_master: message = u"I received a request to reload the monitored configuration. " \ u"I am not the Master arbiter, I ignore and continue to run." logger.warning(message) return {'_status': u'ERR', '_message': message} message = "I received a request to reload the monitored configuration." if self.app.loading_configuration: message += "I am still reloading the monitored configuration ;)" logger.warning(message) self.app.need_config_reload = True return {'_status': u'OK', '_message': message} return {'_status': u'OK', '_message': u"No action to do"} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def command(self, command=None, timestamp=None, element=None, host=None, service=None, user=None, parameters=None): # pylint: disable=too-many-branches """ Request to execute an external command Allowed parameters are: `command`: mandatory parameter containing the whole command line or only the command name `timestamp`: optional parameter containing the timestamp. If not present, the current timestamp is added in the command line `element`: the targeted element that will be appended after the command name (`command`). If element contains a '/' character it is split to make an host and service. `host`, `service` or `user`: the targeted host, service or user. Takes precedence over the `element` to target a specific element `parameters`: the parameter that will be appended after all the arguments When using this endpoint with the HTTP GET method, the semi colons that are commonly used to separate the parameters must be replace with %3B! This because the ; is an accepted URL query parameters separator... Indeed, the recommended way of using this endpoint is to use the HTTP POST method. In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error The `_status` field is 'OK' with an according `_message` to explain what the Arbiter will do depending upon the notification. The `command` property contains the formatted external command. :return: dict """ if cherrypy.request.method in ["POST"]: if not cherrypy.request.json: return {'_status': u'ERR', '_message': u'You must POST parameters on this endpoint.'} if command is None: try: command = cherrypy.request.json.get('command', None) timestamp = cherrypy.request.json.get('timestamp', None) element = cherrypy.request.json.get('element', None) host = cherrypy.request.json.get('host', None) service = cherrypy.request.json.get('service', None) user = cherrypy.request.json.get('user', None) parameters = cherrypy.request.json.get('parameters', None) except AttributeError: return {'_status': u'ERR', '_message': u'Missing command parameters'} if not command: return {'_status': u'ERR', '_message': u'Missing command parameter'} fields = split_semicolon(command) command_line = command.replace(fields[0], fields[0].upper()) if timestamp: try: timestamp = int(timestamp) except ValueError: return {'_status': u'ERR', '_message': u'Timestamp must be an integer value'} command_line = '[%d] %s' % (timestamp, command_line) if host or service or user: if host: command_line = '%s;%s' % (command_line, host) if service: command_line = '%s;%s' % (command_line, service) if user: command_line = '%s;%s' % (command_line, user) elif element: if '/' in element: # Replace only the first / element = element.replace('/', ';', 1) command_line = '%s;%s' % (command_line, element) if parameters: command_line = '%s;%s' % (command_line, parameters) # Add a command to get managed logger.warning("Got an external command: %s", command_line) self.app.add(ExternalCommand(command_line)) return {'_status': u'OK', '_message': u"Got command: %s" % command_line, 'command': command_line} @cherrypy.expose @cherrypy.tools.json_out() def problems(self): """Alias for monitoring_problems""" return self.monitoring_problems @cherrypy.expose @cherrypy.tools.json_out() def monitoring_problems(self): """Get Alignak detailed monitoring status This will return an object containing the properties of the `identity`, plus a `problems` object which contains 2 properties for each known scheduler: - _freshness, which is the timestamp when the provided data were fetched - problems, which is an object with the scheduler known problems: { ... "problems": { "scheduler-master": { "_freshness": 1528903945, "problems": { "fdfc986d-4ab4-4562-9d2f-4346832745e6": { "last_state": "CRITICAL", "service": "dummy_critical", "last_state_type": "SOFT", "last_state_update": 1528902442, "last_hard_state": "CRITICAL", "last_hard_state_change": 1528902442, "last_state_change": 1528902381, "state": "CRITICAL", "state_type": "HARD", "host": "host-all-8", "output": "Hi, checking host-all-8/dummy_critical -> exit=2" }, "2445f2a3-2a3b-4b13-96ed-4cfb60790e7e": { "last_state": "WARNING", "service": "dummy_warning", "last_state_type": "SOFT", "last_state_update": 1528902463, "last_hard_state": "WARNING", "last_hard_state_change": 1528902463, "last_state_change": 1528902400, "state": "WARNING", "state_type": "HARD", "host": "host-all-6", "output": "Hi, checking host-all-6/dummy_warning -> exit=1" }, ... } } } } :return: schedulers live synthesis list :rtype: dict """ res = self.identity() res['problems'] = {} for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get( 'monitoring_problems', wait=True) res['problems'][scheduler_link.name] = {} if '_freshness' in sched_res: res['problems'][scheduler_link.name].update( {'_freshness': sched_res['_freshness']}) if 'problems' in sched_res: res['problems'][scheduler_link.name].update( {'problems': sched_res['problems']}) res['_freshness'] = int(time.time()) return res @cherrypy.expose @cherrypy.tools.json_out() def livesynthesis(self): """Get Alignak live synthesis This will return an object containing the properties of the `identity`, plus a `livesynthesis` object which contains 2 properties for each known scheduler: - _freshness, which is the timestamp when the provided data were fetched - livesynthesis, which is an object with the scheduler live synthesis. An `_overall` fake scheduler is also contained in the schedulers list to provide the cumulated live synthesis. Before sending the results, the arbiter sums-up all its schedulers live synthesis counters in the `_overall` live synthesis. { ... "livesynthesis": { "_overall": { "_freshness": 1528947526, "livesynthesis": { "hosts_total": 11, "hosts_not_monitored": 0, "hosts_up_hard": 11, "hosts_up_soft": 0, "hosts_down_hard": 0, "hosts_down_soft": 0, "hosts_unreachable_hard": 0, "hosts_unreachable_soft": 0, "hosts_flapping": 0, "hosts_problems": 0, "hosts_acknowledged": 0, "hosts_in_downtime": 0, "services_total": 100, "services_not_monitored": 0, "services_ok_hard": 70, "services_ok_soft": 0, "services_warning_hard": 4, "services_warning_soft": 6, "services_critical_hard": 6, "services_critical_soft": 4, "services_unknown_hard": 3, "services_unknown_soft": 7, "services_unreachable_hard": 0, "services_unreachable_soft": 0, "services_flapping": 0, "services_problems": 0, "services_acknowledged": 0, "services_in_downtime": 0 } } }, "scheduler-master": { "_freshness": 1528947522, "livesynthesis": { "hosts_total": 11, "hosts_not_monitored": 0, "hosts_up_hard": 11, "hosts_up_soft": 0, "hosts_down_hard": 0, "hosts_down_soft": 0, "hosts_unreachable_hard": 0, "hosts_unreachable_soft": 0, "hosts_flapping": 0, "hosts_problems": 0, "hosts_acknowledged": 0, "hosts_in_downtime": 0, "services_total": 100, "services_not_monitored": 0, "services_ok_hard": 70, "services_ok_soft": 0, "services_warning_hard": 4, "services_warning_soft": 6, "services_critical_hard": 6, "services_critical_soft": 4, "services_unknown_hard": 3, "services_unknown_soft": 7, "services_unreachable_hard": 0, "services_unreachable_soft": 0, "services_flapping": 0, "services_problems": 0, "services_acknowledged": 0, "services_in_downtime": 0 } } } } } :return: scheduler live synthesis :rtype: dict """ res = self.identity() res.update(self.app.get_livesynthesis()) return res @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def object(self, o_type, o_name=None): """Get a monitored object from the arbiter. Indeed, the arbiter requires the object from its schedulers. It will iterate in its schedulers list until a matching object is found. Else it will return a Json structure containing _status and _message properties. When found, the result is a serialized object which is a Json structure containing: - content: the serialized object content - __sys_python_module__: the python class of the returned object The Alignak unserialize function of the alignak.misc.serialization package allows to restore the initial object. .. code-block:: python from alignak.misc.serialization import unserialize from alignak.objects.hostgroup import Hostgroup raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts") print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() group = unserialize(object, True) assert group.__class__ == Hostgroup assert group.get_name() == 'allhosts' As an example: { "__sys_python_module__": "alignak.objects.hostgroup.Hostgroup", "content": { "uuid": "32248642-97dd-4f39-aaa2-5120112a765d", "name": "", "hostgroup_name": "allhosts", "use": [], "tags": [], "alias": "All Hosts", "notes": "", "definition_order": 100, "register": true, "unknown_members": [], "notes_url": "", "action_url": "", "imported_from": "unknown", "conf_is_correct": true, "configuration_errors": [], "configuration_warnings": [], "realm": "", "downtimes": {}, "hostgroup_members": [], "members": [ "553d47bc-27aa-426c-a664-49c4c0c4a249", "f88093ca-e61b-43ff-a41e-613f7ad2cea2", "df1e2e13-552d-43de-ad2a-fe80ad4ba979", "d3d667dd-f583-4668-9f44-22ef3dcb53ad" ] } } :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str """ for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get('object', {'o_type': o_type, 'o_name': o_name}, wait=True) if isinstance(sched_res, dict) and 'content' in sched_res: return sched_res return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def dump(self, o_name=None, details=False, raw=False): """Dump an host (all hosts) from the arbiter. The arbiter will get the host (all hosts) information from all its schedulers. This gets the main host information from the scheduler. If details is set, then some more information are provided. This will not get all the host known attributes but only a reduced set that will inform about the host and its services status If raw is set the information are provided in two string lists formated as CSV strings. The first list element contains the hosts information and the second one contains the services information. If an host name is provided, this function will get only this host information, else all the scheduler hosts are returned. As an example (in raw format): { scheduler-master-3: [ [ "type;host;name;last_check;state_id;state;state_type;is_problem; is_impact;output", "localhost;host;localhost;1532451740;0;UP;HARD;False;False; Host assumed to be UP", "host_2;host;host_2;1532451988;1;DOWN;HARD;True;False;I am always Down" ], [ "type;host;name", "host_2;service;dummy_no_output;1532451981;0;OK;HARD;False;True; Service internal check result: 0", "host_2;service;dummy_warning;1532451960;4;UNREACHABLE;HARD;False;True; host_2-dummy_warning-1", "host_2;service;dummy_unreachable;1532451987;4;UNREACHABLE;HARD;False;True; host_2-dummy_unreachable-4", "host_2;service;dummy_random;1532451949;4;UNREACHABLE;HARD;False;True; Service internal check result: 2", "host_2;service;dummy_ok;1532452002;0;OK;HARD;False;True;host_2", "host_2;service;dummy_critical;1532451953;4;UNREACHABLE;HARD;False;True; host_2-dummy_critical-2", "host_2;service;dummy_unknown;1532451945;4;UNREACHABLE;HARD;False;True; host_2-dummy_unknown-3", "host_2;service;dummy_echo;1532451973;4;UNREACHABLE;HARD;False;True;" ] ], scheduler-master-2: [ [ "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "host_0;host;host_0;1532451993;0;UP;HARD;False;False;I am always Up", "BR_host;host;BR_host;1532451991;0;UP;HARD;False;False;Host assumed to be UP" ], [ "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "host_0;service;dummy_no_output;1532451970;0;OK;HARD;False;False; Service internal check result: 0", "host_0;service;dummy_unknown;1532451964;3;UNKNOWN;HARD;True;False; host_0-dummy_unknown-3", "host_0;service;dummy_random;1532451991;1;WARNING;HARD;True;False; Service internal check result: 1", "host_0;service;dummy_warning;1532451945;1;WARNING;HARD;True;False; host_0-dummy_warning-1", "host_0;service;dummy_unreachable;1532451986;4;UNREACHABLE;HARD;True;False; host_0-dummy_unreachable-4", "host_0;service;dummy_ok;1532452012;0;OK;HARD;False;False;host_0", "host_0;service;dummy_critical;1532451987;2;CRITICAL;HARD;True;False; host_0-dummy_critical-2", "host_0;service;dummy_echo;1532451963;0;OK;HARD;False;False;", "BR_host;service;dummy_critical;1532451970;2;CRITICAL;HARD;True;False; BR_host-dummy_critical-2", "BR_host;service;BR_Simple_And;1532451895;1;WARNING;HARD;True;True;", "BR_host;service;dummy_unreachable;1532451981;4;UNREACHABLE;HARD;True;False; BR_host-dummy_unreachable-4", "BR_host;service;dummy_no_output;1532451975;0;OK;HARD;False;False; Service internal check result: 0", "BR_host;service;dummy_unknown;1532451955;3;UNKNOWN;HARD;True;False; BR_host-dummy_unknown-3", "BR_host;service;dummy_echo;1532451981;0;OK;HARD;False;False;", "BR_host;service;dummy_warning;1532451972;1;WARNING;HARD;True;False; BR_host-dummy_warning-1", "BR_host;service;dummy_random;1532451976;4;UNREACHABLE;HARD;True;False; Service internal check result: 4", "BR_host;service;dummy_ok;1532451972;0;OK;HARD;False;False;BR_host" ] ], ... More information are available in the scheduler corresponding API endpoint. :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str """ if details is not False: details = bool(details) if raw is not False: raw = bool(raw) res = {} for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get('dump', {'o_name': o_name, 'details': '1' if details else '', 'raw': '1' if raw else ''}, wait=True) if isinstance(sched_res, dict) and \ '_status' in sched_res and sched_res['_status'] == 'ERR': continue res[scheduler_link.name] = sched_res return res @cherrypy.expose @cherrypy.tools.json_out() def status(self, details=False): """Get the overall alignak status Returns a list of the satellites as in: { services: [ { livestate: { perf_data: "", timestamp: 1532106561, state: "ok", long_output: "", output: "all daemons are up and running." }, name: "arbiter-master" }, { livestate: { name: "poller_poller-master", timestamp: 1532106561, long_output: "Realm: (True). Listening on: http://127.0.0.1:7771/", state: "ok", output: "daemon is alive and reachable.", perf_data: "last_check=1532106560.17" }, name: "poller-master" }, ... ... ], variables: { }, livestate: { timestamp: 1532106561, long_output: "broker-master - daemon is alive and reachable. poller-master - daemon is alive and reachable. reactionner-master - daemon is alive and reachable. receiver-master - daemon is alive and reachable. receiver-nsca - daemon is alive and reachable. scheduler-master - daemon is alive and reachable. scheduler-master-2 - daemon is alive and reachable. scheduler-master-3 - daemon is alive and reachable.", state: "up", output: "All my daemons are up and running.", perf_data: " 'servicesextinfo'=0 'businessimpactmodulations'=0 'hostgroups'=2 'resultmodulations'=0 'escalations'=0 'schedulers'=3 'hostsextinfo'=0 'contacts'=2 'servicedependencies'=0 'servicegroups'=1 'pollers'=1 'arbiters'=1 'receivers'=2 'macromodulations'=0 'reactionners'=1 'contactgroups'=2 'brokers'=1 'realms'=3 'services'=32 'commands'=11 'notificationways'=2 'timeperiods'=4 'modules'=0 'checkmodulations'=0 'hosts'=6 'hostdependencies'=0" }, name: "My Alignak", template: { notes: "", alias: "My Alignak", _templates: [ "alignak", "important" ], active_checks_enabled: false, passive_checks_enabled: true } } :param details: Details are required (different from 0) :type details bool :return: dict with key *daemon_type* and value list of daemon name :rtype: dict """ if details is not False: details = bool(details) return self.app.get_alignak_status(details=details) @cherrypy.expose @cherrypy.tools.json_out() def events_log(self, details=False, count=0, timestamp=0): """Get the most recent Alignak events If count is specifies it is the maximum number of events to return. If timestamp is specified, events older than this timestamp will not be returned The arbiter maintains a list of the most recent Alignak events. This endpoint provides this list. The default format is: [ "2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: guest;host_0;dummy_random;CRITICAL;1; notify-service-by-log;Service internal check result: 2", "2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: admin;host_0;dummy_random;CRITICAL;1; notify-service-by-log;Service internal check result: 2", "2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_critical;CRITICAL;SOFT;1; host_0-dummy_critical-2", "2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_random;CRITICAL;HARD;2; Service internal check result: 2", "2018-07-23 15:14:42 - I - SERVICE ALERT: host_0;dummy_unknown;UNKNOWN;HARD;2; host_0-dummy_unknown-3" ] If you request on this endpoint with the *details* parameter (whatever its value...), you will get a detailed JSON output: [ { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:35", message: "SERVICE ALERT: host_11;dummy_echo;UNREACHABLE;HARD;2;", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE NOTIFICATION: guest;host_0;dummy_random;OK;0; notify-service-by-log;Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE NOTIFICATION: admin;host_0;dummy_random;OK;0; notify-service-by-log;Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE ALERT: host_0;dummy_random;OK;HARD;2; Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:19", message: "SERVICE ALERT: host_11;dummy_random;OK;HARD;2; Service internal check result: 0", level: "info" } ] In this example, only the 5 most recent events are provided whereas the default value is to provide the 100 last events. This default counter may be changed thanks to the ``events_log_count`` configuration variable or ``ALIGNAK_EVENTS_LOG_COUNT`` environment variable. The date format may also be changed thanks to the ``events_date_format`` configuration variable. :return: list of the most recent events :rtype: list """ if not count: count = 1 + int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT', self.app.conf.events_log_count)) count = int(count) timestamp = float(timestamp) logger.debug('Get max %d events, newer than %s out of %d', count, timestamp, len(self.app.recent_events)) res = [] for log in reversed(self.app.recent_events): if timestamp and timestamp > log['timestamp']: break if not count: break if details: # Exposes the full object res.append(log) else: res.append("%s - %s - %s" % (log['date'], log['level'][0].upper(), log['message'])) logger.debug('Got %d events', len(res)) return res @cherrypy.expose @cherrypy.tools.json_out() def satellites_list(self, daemon_type=''): """Get the arbiter satellite names sorted by type Returns a list of the satellites as in: { reactionner: [ "reactionner-master" ], broker: [ "broker-master" ], arbiter: [ "arbiter-master" ], scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ], receiver: [ "receiver-nsca", "receiver-master" ], poller: [ "poller-master" ] } If a specific daemon type is requested, the list is reduced to this unique daemon type: { scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ] } :param daemon_type: daemon type to filter :type daemon_type: str :return: dict with key *daemon_type* and value list of daemon name :rtype: dict """ with self.app.conf_lock: res = {} for s_type in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver', 'broker']: if daemon_type and daemon_type != s_type: continue satellite_list = [] res[s_type] = satellite_list for daemon_link in getattr(self.app.conf, s_type + 's', []): satellite_list.append(daemon_link.name) return res @cherrypy.expose @cherrypy.tools.json_out() def realms(self, details=False): """Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict """ def get_realm_info(realm, realms, satellites, details=False): """Get the realm and its children information :return: None """ res = { "name": realm.get_name(), "level": realm.level, "hosts": realm.members, "hostgroups": realm.group_members, "children": {}, "satellites": { } } for child in realm.realm_members: child = realms.find_by_name(child) if not child: continue realm_infos = get_realm_info( child, realms, satellites, details=details) res['children'][child.get_name()] = realm_infos for sat_type in ['scheduler', 'reactionner', 'broker', 'receiver', 'poller']: res["satellites"][sat_type + 's'] = [] sats = realm.get_potential_satellites_by_type( satellites, sat_type) for sat in sats: if details: res["satellites"][sat_type + 's'][sat.name] = sat.give_satellite_json() else: res["satellites"][sat_type + 's'].append(sat.name) return res if details is not False: details = bool(details) # Report our daemons states, but only if a dispatcher and the configuration is loaded if not getattr(self.app, 'dispatcher', None) or not getattr(self.app, 'conf', None): return {'_status': u'ERR', '_message': "Not yet available. Please come back later."} res = {} higher_realms = [ realm for realm in self.app.conf.realms if realm.level == 0] for realm in higher_realms: res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms, self.app.dispatcher.all_daemons_links) return res @cherrypy.expose @cherrypy.tools.json_out() def system(self, details=False): """Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict """ def get_realm_info(realm, realms, satellites, details=False): """Get the realm and its children information :return: None """ res = { "name": realm.get_name(), "level": realm.level, "hosts": realm.members, "groups": realm.group_members, "children": {}, "satellites": { } } for child in realm.realm_members: child = realms.find_by_name(child) if not child: continue realm_infos = get_realm_info( child, realms, satellites, details=details) res['children'][child.get_name()] = realm_infos for sat_type in ['scheduler', 'reactionner', 'broker', 'receiver', 'poller']: res["satellites"][sat_type + 's'] = [] sats = realm.get_potential_satellites_by_type( satellites, sat_type) for sat in sats: if details: res["satellites"][sat_type + 's'][sat.name] = sat.give_satellite_json() else: res["satellites"][sat_type + 's'].append(sat.name) return res if details is not False: details = bool(details) # Report our daemons states, but only if a dispatcher and the configuration is loaded if not getattr(self.app, 'dispatcher', None) or not getattr(self.app, 'conf', None): return {'_status': u'ERR', '_message': "Not yet available. Please come back later."} res = {} higher_realms = [ realm for realm in self.app.conf.realms if realm.level == 0] for realm in higher_realms: res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms, self.app.dispatcher.all_daemons_links) return res @cherrypy.expose @cherrypy.tools.json_out() def satellites_configuration(self): """Return all the configuration data of satellites :return: dict containing satellites data Output looks like this :: {'arbiter' : [{'property1':'value1' ..}, {'property2', 'value11' ..}, ..], 'scheduler': [..], 'poller': [..], 'reactionner': [..], 'receiver': [..], 'broker: [..]' } :rtype: dict """ res = {} for s_type in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver', 'broker']: lst = [] res[s_type] = lst for daemon in getattr(self.app.conf, s_type + 's'): cls = daemon.__class__ env = {} all_props = [cls.properties, cls.running_properties] for props in all_props: for prop in props: if not hasattr(daemon, prop): continue if prop in ["realms", "conf", "con", "tags", "modules", "cfg", "broks", "cfg_to_manage"]: continue val = getattr(daemon, prop) # give a try to a json able object try: json.dumps(val) env[prop] = val except TypeError as exp: logger.warning( 'satellites_configuration, %s: %s', prop, str(exp)) lst.append(env) return res @cherrypy.expose @cherrypy.tools.json_out() def external_commands(self): """Get the external commands from the daemon Use a lock for this function to protect :return: serialized external command list :rtype: str """ res = [] with self.app.external_commands_lock: for cmd in self.app.get_external_commands(): res.append(cmd.serialize()) return res ##### # ____ __ # / ___| _ __ __ _ / _| __ _ _ __ __ _ # | | _ | '__| / _` | | |_ / _` | | '_ \ / _` | # | |_| | | | | (_| | | _| | (_| | | | | | | (_| | # \____| |_| \__,_| |_| \__,_| |_| |_| \__,_| # ##### @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def search(self): # pylint: disable=no-self-use """ Request available queries Posted data: {u'target': u''} Return the list of available target queries :return: See upper comment :rtype: list """ logger.debug("Grafana search... %s", cherrypy.request.method) if cherrypy.request.method == 'OPTIONS': cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET,POST,PATCH,PUT,DELETE' cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization' cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' cherrypy.request.handler = None return {} if getattr(cherrypy.request, 'json', None): logger.debug("Posted data: %s", cherrypy.request.json) logger.debug("Grafana search returns: %s", GRAFANA_TARGETS) return GRAFANA_TARGETS @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def query(self): """ Request object passed to datasource.query function: { 'timezone': 'browser', 'panelId': 38, 'range': { 'from': '2018-08-29T02:38:09.633Z', 'to': '2018-08-29T03:38:09.633Z', 'raw': {'from': 'now-1h', 'to': 'now'} }, 'rangeRaw': {'from': 'now-1h', 'to': 'now'}, 'interval': '10s', 'intervalMs': 10000, 'targets': [ { 'target': 'problems', 'refId': 'A', 'type': 'table'} ], 'format': 'json', 'maxDataPoints': 314, 'scopedVars': { '__interval': {'text': '10s', 'value': '10s'}, '__interval_ms': {'text': 10000, 'value': 10000} } } Only the first target is considered. If several targets are required, an error is raised. The target is a string that is searched in the target_queries dictionary. If found the corresponding query is executed and the result is returned. Table response from datasource.query. An array of: [ { "type": "table", "columns": [ { "text": "Time", "type": "time", "sort": true, "desc": true, }, { "text": "mean", }, { "text": "sum", } ], "rows": [ [ 1457425380000, null, null ], [ 1457425370000, 1002.76215352, 1002.76215352 ], ] } ] :return: See upper comment :rtype: list """ logger.debug("Grafana query... %s", cherrypy.request.method) if cherrypy.request.method == 'OPTIONS': cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET,POST,PATCH,PUT,DELETE' cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization' cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' cherrypy.request.handler = None return {} if getattr(cherrypy.request, 'json', None): posted_data = cherrypy.request.json logger.debug("Posted data: %s", cherrypy.request.json) targets = None target = None try: targets = posted_data.get("targets") assert targets assert len(targets) == 1 target = targets[0].get("target") except Exception as exp: # pylint: disable=broad-except cherrypy.response.status = 409 return {'_status': u'ERR', '_message': u'Request error: %s.' % exp} resp = [] if target in ['events_log']: resp = [{ "type": "table", "columns": [ { "text": "Time", "type": "time", "sort": True, "desc": True }, { "text": "Severity", "type": "integer" }, { "text": "Message", "type": "string" } ], "rows": [] }] severity = { "info": 0, 'warning': 1, 'error': 2, 'critical': 3 } for log in reversed(self.app.recent_events): # 0 for the first required target # timestamp must be precise on ms for Grafana resp[0]['rows'].append([log['timestamp'] * 1000, severity.get(log['level'].lower(), 3), log['message']]) if target in ['problems_log']: resp = [{ "type": "table", "columns": [ { "text": "Raised", "type": "time", "sort": True, "desc": True }, { "text": "Severity", "type": "integer" }, { "text": "Host", "type": "string" }, { "text": "Service", "type": "string" }, { "text": "State", "type": "integer" }, { "text": "Output", "type": "string" } ], "rows": [] }] severity = { "up": 0, 'down': 2, 'ok': 0, 'warning': 1, 'critical': 2 } problems = {} for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get( 'monitoring_problems', wait=True) if 'problems' in sched_res: problems.update(sched_res['problems']) # todo: add a sorting for problem_uuid in problems: log = problems[problem_uuid] # 0 for the first required target resp[0]['rows'].append([log['last_hard_state_change'] * 1000, severity.get(log['state'].lower(), 3), log['host'], log['service'], log['state'], log['output']]) return resp ##### # _ _ _ _ # / \ | | (_) __ _ _ __ __ _ | | __ # / _ \ | | | | / _` | | '_ \ / _` | | |/ / # / ___ \ | | | | | (_| | | | | | | (_| | | < # /_/ \_\ |_| |_| \__, | |_| |_| \__,_| |_|\_\ # |___/ ##### def _build_host_livestate(self, host_name, livestate): # pylint: disable=no-self-use, too-many-locals """Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line """ state = livestate.get('state', 'UP').upper() output = livestate.get('output', '') long_output = livestate.get('long_output', '') perf_data = livestate.get('perf_data', '') try: timestamp = int(livestate.get('timestamp', 'ABC')) except ValueError: timestamp = None host_state_to_id = { "UP": 0, "DOWN": 1, "UNREACHABLE": 2 } parameters = '%s;%s' % (host_state_to_id.get(state, 3), output) if long_output and perf_data: parameters = '%s|%s\n%s' % (parameters, perf_data, long_output) elif long_output: parameters = '%s\n%s' % (parameters, long_output) elif perf_data: parameters = '%s|%s' % (parameters, perf_data) command_line = 'PROCESS_HOST_CHECK_RESULT;%s;%s' % ( host_name, parameters) if timestamp is not None: command_line = '[%d] %s' % (timestamp, command_line) else: command_line = '[%d] %s' % (int(time.time()), command_line) return command_line def _build_service_livestate(self, host_name, service_name, livestate): # pylint: disable=no-self-use, too-many-locals """Build and notify the external command for a service livestate PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> Create and post a logcheckresult to the backend for the livestate :param host_name: the concerned host name :param service_name: the concerned service name :param livestate: livestate dictionary :return: external command line """ state = livestate.get('state', 'OK').upper() output = livestate.get('output', '') long_output = livestate.get('long_output', '') perf_data = livestate.get('perf_data', '') try: timestamp = int(livestate.get('timestamp', 'ABC')) except ValueError: timestamp = None service_state_to_id = { "OK": 0, "WARNING": 1, "CRITICAL": 2, "UNKNOWN": 3, "UNREACHABLE": 4 } parameters = '%s;%s' % (service_state_to_id.get(state, 3), output) if long_output and perf_data: parameters = '%s|%s\n%s' % (parameters, perf_data, long_output) elif long_output: parameters = '%s\n%s' % (parameters, long_output) elif perf_data: parameters = '%s|%s' % (parameters, perf_data) command_line = 'PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s' % \ (host_name, service_name, parameters) if timestamp is not None: command_line = '[%d] %s' % (timestamp, command_line) else: command_line = '[%d] %s' % (int(time.time()), command_line) return command_line @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def host(self): # pylint: disable=too-many-branches """Get a passive checks for an host and its services This function builds the external commands corresponding to the host and services provided information :param host_name: host name :param data: dictionary of the host properties to be modified :return: command line """ logger.debug("Host status...") if cherrypy.request.method not in ["PATCH", "POST"]: cherrypy.response.status = 405 return {'_status': 'ERR', '_error': 'You must only PATCH or POST on this endpoint.'} # Update an host # --- if not cherrypy.request.json: return {'_status': 'ERR', '_error': 'You must send parameters on this endpoint.'} host_name = None if cherrypy.request.json.get('name', None) is not None: host_name = cherrypy.request.json.get('name', None) if not host_name: return {'_status': 'ERR', '_error': 'Missing targeted host name.'} # Get provided data # --- logger.debug("Posted data: %s", cherrypy.request.json) # Check if the host exist in Alignak # --- # todo: Not mandatory but it would be clean... # Prepare response # --- ws_result = {'_status': 'OK', '_result': ['%s is alive :)' % host_name], '_issues': []} # Manage the host livestate # --- # Alert on unordered livestate if several information exist now = int(time.time()) livestate = cherrypy.request.json.get('livestate', None) if not livestate: # Create an host live state command livestate = {'state': "UP"} if not isinstance(livestate, list): livestate = [livestate] last_ts = 0 for ls in livestate: if ls.get('state', None) is None: ws_result['_issues'].append("Missing state for the host '%s' livestate, " "assuming host is UP!" % host_name) ls['state'] = 'UP' # Tag our own timestamp ls['_ws_timestamp'] = now try: timestamp = int(ls.get('timestamp', 'ABC')) if timestamp < last_ts: logger.info("Got unordered timestamp for the host '%s'. " "The Alignak scheduler may not handle the check result!", host_name) last_ts = timestamp except ValueError: pass for ls in livestate: state = ls.get('state').upper() if state not in ['UP', 'DOWN', 'UNREACHABLE']: ws_result['_issues'].append("Host state should be UP, DOWN or UNREACHABLE" ", and not '%s'." % (state)) else: # Create an host live state command command = self._build_host_livestate(host_name, ls) ws_result['_result'].append("Raised: %s" % command) # Notify the external command to our Arbiter daemon self.app.add(ExternalCommand(command)) services = cherrypy.request.json.get('services', None) if not services: return ws_result for service in services: service_name = service.get('name', None) if service_name is None: ws_result['_issues'].append( "A service does not have a 'name' property") continue livestate = service.get('livestate', None) if not livestate: # Create a service live state command livestate = {'state': "OK"} if not isinstance(livestate, list): livestate = [livestate] last_ts = 0 for ls in livestate: if ls.get('state', None) is None: ws_result['_issues'].append("Missing state for the service %s/%s livestate, " "assuming service is OK!" % (host_name, service_name)) ls['state'] = 'OK' # Tag our own timestamp ls['_ws_timestamp'] = now try: timestamp = int(ls.get('timestamp', 'ABC')) if timestamp < last_ts: logger.info("Got unordered timestamp for the service: %s/%s. " "The Alignak scheduler may not handle the check result!", host_name, service_name) last_ts = timestamp except ValueError: pass for ls in livestate: state = ls.get('state').upper() if state not in ['OK', 'WARNING', 'CRITICAL', 'UNKNOWN', 'UNREACHABLE']: ws_result['_issues'].append("Service %s/%s state must be OK, WARNING, " "CRITICAL, UNKNOWN or UNREACHABLE, and not %s." % (host_name, service_name, state)) else: # Create a service live state command command = self._build_service_livestate( host_name, service_name, ls) ws_result['_result'].append("Raised: %s" % command) # Notify the external command to our Arbiter daemon self.app.add(ExternalCommand(command)) return ws_result ##### # ___ _ _ _ # |_ _| _ __ | |_ ___ _ __ _ __ __ _ | | ___ _ __ | | _ _ # | | | '_ \ | __| / _ \ | '__| | '_ \ / _` | | | / _ \ | '_ \ | | | | | | # | | | | | | | |_ | __/ | | | | | | | (_| | | | | (_) | | | | | | | | |_| | # |___| |_| |_| \__| \___| |_| |_| |_| \__,_| |_| \___/ |_| |_| |_| \__, | # |___/ ##### @cherrypy.expose @cherrypy.tools.json_out() def _wait_new_conf(self): """Ask the daemon to drop its configuration and wait for a new one This overrides the default method from GenericInterface :return: None """ with self.app.conf_lock: logger.warning( "My master Arbiter wants me to wait for a new configuration.") self.app.cur_conf = {} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_configuration(self, pushed_configuration=None): """Send a new configuration to the daemon This overrides the default method from GenericInterface Used by the master arbiter to send its configuration to a spare arbiter This function is not intended for external use. It is quite complex to build a configuration for a daemon and it is the arbter dispatcher job ;) :param pushed_configuration: new conf to send :return: None """ pushed_configuration = cherrypy.request.json self.app.must_run = False return super(ArbiterInterface, self)._push_configuration( pushed_configuration=pushed_configuration['conf']) @cherrypy.expose @cherrypy.tools.json_out() def _do_not_run(self): """The master arbiter tells to its spare arbiters to not run. A master arbiter will ignore this request and it will return an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error :return: None """ # If I'm the master, ignore the command and raise a log if self.app.is_master: message = "Received message to not run. " \ "I am the Master arbiter, ignore and continue to run." logger.warning(message) return {'_status': u'ERR', '_message': message} # Else, I'm just a spare, so I listen to my master logger.debug("Received message to not run. I am the spare, stopping.") self.app.last_master_speak = time.time() self.app.must_run = False return {'_status': u'OK', '_message': message} @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_external_command(self, command=None): """Only to maintain ascending compatibility... this function uses the inner *command* endpoint. :param command: Alignak external command :type command: string :return: None """ return self.command(command=command)
class ArbiterInterface(GenericInterface): '''This module provide a specific HTTP interface for an Arbiter daemon.''' @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def reload_configuration(self): '''Ask to the arbiter to reload the monitored configuration **Note** tha the arbiter will not reload its main configuration file (eg. alignak.ini) but it will reload the monitored objects from the Nagios legacy files or from the Alignak backend! In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error :return: True if configuration reload is accepted ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def backend_notification(self, event=None, parameters=None): '''The Alignak backend raises an event to the Alignak arbiter ----- Possible events are: - creation, for a realm or an host creation - deletion, for a realm or an host deletion Calls the reload configuration function if event is creation or deletion Else, nothing for the moment! In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error The `_status` field is 'OK' with an according `_message` to explain what the Arbiter will do depending upon the notification. :return: dict ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def command(self, command=None, timestamp=None, element=None, host=None, service=None, user=None, parameters=None): ''' Request to execute an external command Allowed parameters are: `command`: mandatory parameter containing the whole command line or only the command name `timestamp`: optional parameter containing the timestamp. If not present, the current timestamp is added in the command line `element`: the targeted element that will be appended after the command name (`command`). If element contains a '/' character it is split to make an host and service. `host`, `service` or `user`: the targeted host, service or user. Takes precedence over the `element` to target a specific element `parameters`: the parameter that will be appended after all the arguments When using this endpoint with the HTTP GET method, the semi colons that are commonly used to separate the parameters must be replace with %3B! This because the ; is an accepted URL query parameters separator... Indeed, the recommended way of using this endpoint is to use the HTTP POST method. In case of any error, this function returns an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error The `_status` field is 'OK' with an according `_message` to explain what the Arbiter will do depending upon the notification. The `command` property contains the formatted external command. :return: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def problems(self): '''Alias for monitoring_problems''' pass @cherrypy.expose @cherrypy.tools.json_out() def monitoring_problems(self): '''Get Alignak detailed monitoring status This will return an object containing the properties of the `identity`, plus a `problems` object which contains 2 properties for each known scheduler: - _freshness, which is the timestamp when the provided data were fetched - problems, which is an object with the scheduler known problems: { ... "problems": { "scheduler-master": { "_freshness": 1528903945, "problems": { "fdfc986d-4ab4-4562-9d2f-4346832745e6": { "last_state": "CRITICAL", "service": "dummy_critical", "last_state_type": "SOFT", "last_state_update": 1528902442, "last_hard_state": "CRITICAL", "last_hard_state_change": 1528902442, "last_state_change": 1528902381, "state": "CRITICAL", "state_type": "HARD", "host": "host-all-8", "output": "Hi, checking host-all-8/dummy_critical -> exit=2" }, "2445f2a3-2a3b-4b13-96ed-4cfb60790e7e": { "last_state": "WARNING", "service": "dummy_warning", "last_state_type": "SOFT", "last_state_update": 1528902463, "last_hard_state": "WARNING", "last_hard_state_change": 1528902463, "last_state_change": 1528902400, "state": "WARNING", "state_type": "HARD", "host": "host-all-6", "output": "Hi, checking host-all-6/dummy_warning -> exit=1" }, ... } } } } :return: schedulers live synthesis list :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def livesynthesis(self): '''Get Alignak live synthesis This will return an object containing the properties of the `identity`, plus a `livesynthesis` object which contains 2 properties for each known scheduler: - _freshness, which is the timestamp when the provided data were fetched - livesynthesis, which is an object with the scheduler live synthesis. An `_overall` fake scheduler is also contained in the schedulers list to provide the cumulated live synthesis. Before sending the results, the arbiter sums-up all its schedulers live synthesis counters in the `_overall` live synthesis. { ... "livesynthesis": { "_overall": { "_freshness": 1528947526, "livesynthesis": { "hosts_total": 11, "hosts_not_monitored": 0, "hosts_up_hard": 11, "hosts_up_soft": 0, "hosts_down_hard": 0, "hosts_down_soft": 0, "hosts_unreachable_hard": 0, "hosts_unreachable_soft": 0, "hosts_flapping": 0, "hosts_problems": 0, "hosts_acknowledged": 0, "hosts_in_downtime": 0, "services_total": 100, "services_not_monitored": 0, "services_ok_hard": 70, "services_ok_soft": 0, "services_warning_hard": 4, "services_warning_soft": 6, "services_critical_hard": 6, "services_critical_soft": 4, "services_unknown_hard": 3, "services_unknown_soft": 7, "services_unreachable_hard": 0, "services_unreachable_soft": 0, "services_flapping": 0, "services_problems": 0, "services_acknowledged": 0, "services_in_downtime": 0 } } }, "scheduler-master": { "_freshness": 1528947522, "livesynthesis": { "hosts_total": 11, "hosts_not_monitored": 0, "hosts_up_hard": 11, "hosts_up_soft": 0, "hosts_down_hard": 0, "hosts_down_soft": 0, "hosts_unreachable_hard": 0, "hosts_unreachable_soft": 0, "hosts_flapping": 0, "hosts_problems": 0, "hosts_acknowledged": 0, "hosts_in_downtime": 0, "services_total": 100, "services_not_monitored": 0, "services_ok_hard": 70, "services_ok_soft": 0, "services_warning_hard": 4, "services_warning_soft": 6, "services_critical_hard": 6, "services_critical_soft": 4, "services_unknown_hard": 3, "services_unknown_soft": 7, "services_unreachable_hard": 0, "services_unreachable_soft": 0, "services_flapping": 0, "services_problems": 0, "services_acknowledged": 0, "services_in_downtime": 0 } } } } } :return: scheduler live synthesis :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def object(self, o_type, o_name=None): '''Get a monitored object from the arbiter. Indeed, the arbiter requires the object from its schedulers. It will iterate in its schedulers list until a matching object is found. Else it will return a Json structure containing _status and _message properties. When found, the result is a serialized object which is a Json structure containing: - content: the serialized object content - __sys_python_module__: the python class of the returned object The Alignak unserialize function of the alignak.misc.serialization package allows to restore the initial object. .. code-block:: python from alignak.misc.serialization import unserialize from alignak.objects.hostgroup import Hostgroup raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts") print("Got: %s / %s" % (raw_data.status_code, raw_data.content)) assert raw_data.status_code == 200 object = raw_data.json() group = unserialize(object, True) assert group.__class__ == Hostgroup assert group.get_name() == 'allhosts' As an example: { "__sys_python_module__": "alignak.objects.hostgroup.Hostgroup", "content": { "uuid": "32248642-97dd-4f39-aaa2-5120112a765d", "name": "", "hostgroup_name": "allhosts", "use": [], "tags": [], "alias": "All Hosts", "notes": "", "definition_order": 100, "register": true, "unknown_members": [], "notes_url": "", "action_url": "", "imported_from": "unknown", "conf_is_correct": true, "configuration_errors": [], "configuration_warnings": [], "realm": "", "downtimes": {}, "hostgroup_members": [], "members": [ "553d47bc-27aa-426c-a664-49c4c0c4a249", "f88093ca-e61b-43ff-a41e-613f7ad2cea2", "df1e2e13-552d-43de-ad2a-fe80ad4ba979", "d3d667dd-f583-4668-9f44-22ef3dcb53ad" ] } } :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def dump(self, o_name=None, details=False, raw=False): '''Dump an host (all hosts) from the arbiter. The arbiter will get the host (all hosts) information from all its schedulers. This gets the main host information from the scheduler. If details is set, then some more information are provided. This will not get all the host known attributes but only a reduced set that will inform about the host and its services status If raw is set the information are provided in two string lists formated as CSV strings. The first list element contains the hosts information and the second one contains the services information. If an host name is provided, this function will get only this host information, else all the scheduler hosts are returned. As an example (in raw format): { scheduler-master-3: [ [ "type;host;name;last_check;state_id;state;state_type;is_problem; is_impact;output", "localhost;host;localhost;1532451740;0;UP;HARD;False;False; Host assumed to be UP", "host_2;host;host_2;1532451988;1;DOWN;HARD;True;False;I am always Down" ], [ "type;host;name", "host_2;service;dummy_no_output;1532451981;0;OK;HARD;False;True; Service internal check result: 0", "host_2;service;dummy_warning;1532451960;4;UNREACHABLE;HARD;False;True; host_2-dummy_warning-1", "host_2;service;dummy_unreachable;1532451987;4;UNREACHABLE;HARD;False;True; host_2-dummy_unreachable-4", "host_2;service;dummy_random;1532451949;4;UNREACHABLE;HARD;False;True; Service internal check result: 2", "host_2;service;dummy_ok;1532452002;0;OK;HARD;False;True;host_2", "host_2;service;dummy_critical;1532451953;4;UNREACHABLE;HARD;False;True; host_2-dummy_critical-2", "host_2;service;dummy_unknown;1532451945;4;UNREACHABLE;HARD;False;True; host_2-dummy_unknown-3", "host_2;service;dummy_echo;1532451973;4;UNREACHABLE;HARD;False;True;" ] ], scheduler-master-2: [ [ "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "host_0;host;host_0;1532451993;0;UP;HARD;False;False;I am always Up", "BR_host;host;BR_host;1532451991;0;UP;HARD;False;False;Host assumed to be UP" ], [ "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "host_0;service;dummy_no_output;1532451970;0;OK;HARD;False;False; Service internal check result: 0", "host_0;service;dummy_unknown;1532451964;3;UNKNOWN;HARD;True;False; host_0-dummy_unknown-3", "host_0;service;dummy_random;1532451991;1;WARNING;HARD;True;False; Service internal check result: 1", "host_0;service;dummy_warning;1532451945;1;WARNING;HARD;True;False; host_0-dummy_warning-1", "host_0;service;dummy_unreachable;1532451986;4;UNREACHABLE;HARD;True;False; host_0-dummy_unreachable-4", "host_0;service;dummy_ok;1532452012;0;OK;HARD;False;False;host_0", "host_0;service;dummy_critical;1532451987;2;CRITICAL;HARD;True;False; host_0-dummy_critical-2", "host_0;service;dummy_echo;1532451963;0;OK;HARD;False;False;", "BR_host;service;dummy_critical;1532451970;2;CRITICAL;HARD;True;False; BR_host-dummy_critical-2", "BR_host;service;BR_Simple_And;1532451895;1;WARNING;HARD;True;True;", "BR_host;service;dummy_unreachable;1532451981;4;UNREACHABLE;HARD;True;False; BR_host-dummy_unreachable-4", "BR_host;service;dummy_no_output;1532451975;0;OK;HARD;False;False; Service internal check result: 0", "BR_host;service;dummy_unknown;1532451955;3;UNKNOWN;HARD;True;False; BR_host-dummy_unknown-3", "BR_host;service;dummy_echo;1532451981;0;OK;HARD;False;False;", "BR_host;service;dummy_warning;1532451972;1;WARNING;HARD;True;False; BR_host-dummy_warning-1", "BR_host;service;dummy_random;1532451976;4;UNREACHABLE;HARD;True;False; Service internal check result: 4", "BR_host;service;dummy_ok;1532451972;0;OK;HARD;False;False;BR_host" ] ], ... More information are available in the scheduler corresponding API endpoint. :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_out() def status(self, details=False): '''Get the overall alignak status Returns a list of the satellites as in: { services: [ { livestate: { perf_data: "", timestamp: 1532106561, state: "ok", long_output: "", output: "all daemons are up and running." }, name: "arbiter-master" }, { livestate: { name: "poller_poller-master", timestamp: 1532106561, long_output: "Realm: (True). Listening on: http://127.0.0.1:7771/", state: "ok", output: "daemon is alive and reachable.", perf_data: "last_check=1532106560.17" }, name: "poller-master" }, ... ... ], variables: { }, livestate: { timestamp: 1532106561, long_output: "broker-master - daemon is alive and reachable. poller-master - daemon is alive and reachable. reactionner-master - daemon is alive and reachable. receiver-master - daemon is alive and reachable. receiver-nsca - daemon is alive and reachable. scheduler-master - daemon is alive and reachable. scheduler-master-2 - daemon is alive and reachable. scheduler-master-3 - daemon is alive and reachable.", state: "up", output: "All my daemons are up and running.", perf_data: " 'servicesextinfo'=0 'businessimpactmodulations'=0 'hostgroups'=2 'resultmodulations'=0 'escalations'=0 'schedulers'=3 'hostsextinfo'=0 'contacts'=2 'servicedependencies'=0 'servicegroups'=1 'pollers'=1 'arbiters'=1 'receivers'=2 'macromodulations'=0 'reactionners'=1 'contactgroups'=2 'brokers'=1 'realms'=3 'services'=32 'commands'=11 'notificationways'=2 'timeperiods'=4 'modules'=0 'checkmodulations'=0 'hosts'=6 'hostdependencies'=0" }, name: "My Alignak", template: { notes: "", alias: "My Alignak", _templates: [ "alignak", "important" ], active_checks_enabled: false, passive_checks_enabled: true } } :param details: Details are required (different from 0) :type details bool :return: dict with key *daemon_type* and value list of daemon name :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def events_log(self, details=False, count=0, timestamp=0): '''Get the most recent Alignak events If count is specifies it is the maximum number of events to return. If timestamp is specified, events older than this timestamp will not be returned The arbiter maintains a list of the most recent Alignak events. This endpoint provides this list. The default format is: [ "2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: guest;host_0;dummy_random;CRITICAL;1; notify-service-by-log;Service internal check result: 2", "2018-07-23 15:14:43 - E - SERVICE NOTIFICATION: admin;host_0;dummy_random;CRITICAL;1; notify-service-by-log;Service internal check result: 2", "2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_critical;CRITICAL;SOFT;1; host_0-dummy_critical-2", "2018-07-23 15:14:42 - E - SERVICE ALERT: host_0;dummy_random;CRITICAL;HARD;2; Service internal check result: 2", "2018-07-23 15:14:42 - I - SERVICE ALERT: host_0;dummy_unknown;UNKNOWN;HARD;2; host_0-dummy_unknown-3" ] If you request on this endpoint with the *details* parameter (whatever its value...), you will get a detailed JSON output: [ { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:35", message: "SERVICE ALERT: host_11;dummy_echo;UNREACHABLE;HARD;2;", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE NOTIFICATION: guest;host_0;dummy_random;OK;0; notify-service-by-log;Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE NOTIFICATION: admin;host_0;dummy_random;OK;0; notify-service-by-log;Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:32", message: "SERVICE ALERT: host_0;dummy_random;OK;HARD;2; Service internal check result: 0", level: "info" }, { timestamp: 1535517701.1817362, date: "2018-07-23 15:16:19", message: "SERVICE ALERT: host_11;dummy_random;OK;HARD;2; Service internal check result: 0", level: "info" } ] In this example, only the 5 most recent events are provided whereas the default value is to provide the 100 last events. This default counter may be changed thanks to the ``events_log_count`` configuration variable or ``ALIGNAK_EVENTS_LOG_COUNT`` environment variable. The date format may also be changed thanks to the ``events_date_format`` configuration variable. :return: list of the most recent events :rtype: list ''' pass @cherrypy.expose @cherrypy.tools.json_out() def satellites_list(self, daemon_type=''): '''Get the arbiter satellite names sorted by type Returns a list of the satellites as in: { reactionner: [ "reactionner-master" ], broker: [ "broker-master" ], arbiter: [ "arbiter-master" ], scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ], receiver: [ "receiver-nsca", "receiver-master" ], poller: [ "poller-master" ] } If a specific daemon type is requested, the list is reduced to this unique daemon type: { scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ] } :param daemon_type: daemon type to filter :type daemon_type: str :return: dict with key *daemon_type* and value list of daemon name :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def realms(self, details=False): '''Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict ''' pass def get_realm_info(realm, realms, satellites, details=False): '''Get the realm and its children information :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def system(self, details=False): '''Return the realms / satellites configuration Returns an object containing the hierarchical realms configuration with the main information about each realm: { All: { satellites: { pollers: [ "poller-master" ], reactionners: [ "reactionner-master" ], schedulers: [ "scheduler-master", "scheduler-master-3", "scheduler-master-2" ], brokers: [ "broker-master" ], receivers: [ "receiver-master", "receiver-nsca" ] }, children: { }, name: "All", members: [ "host_1", "host_0", "host_3", "host_2", "host_11", "localhost" ], level: 0 }, North: { ... } } Sub realms defined inside a realm are provided in the `children` property of their parent realm and they contain the same information as their parent.. The `members` realm contain the list of the hosts members of the realm. If ``details`` is required, each realm will contain more information about each satellite involved in the realm management: { All: { satellites: { pollers: [ { passive: false, name: "poller-master", livestate_output: "poller/poller-master is up and running.", reachable: true, uri: "http://127.0.0.1:7771/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.593074, type: "poller" } ], reactionners: [ { passive: false, name: "reactionner-master", livestate_output: "reactionner/reactionner-master is up and running.", reachable: true, uri: "http://127.0.0.1:7769/", alive: true, realm_name: "All", manage_sub_realms: true, spare: false, polling_interval: 5, configuration_sent: true, active: true, livestate: 0, max_check_attempts: 3, last_check: 1532242300.587762, type: "reactionner" } ] :return: dict containing realms / satellites :rtype: dict ''' pass def get_realm_info(realm, realms, satellites, details=False): '''Get the realm and its children information :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def satellites_configuration(self): '''Return all the configuration data of satellites :return: dict containing satellites data Output looks like this :: {'arbiter' : [{'property1':'value1' ..}, {'property2', 'value11' ..}, ..], 'scheduler': [..], 'poller': [..], 'reactionner': [..], 'receiver': [..], 'broker: [..]' } :rtype: dict ''' pass @cherrypy.expose @cherrypy.tools.json_out() def external_commands(self): '''Get the external commands from the daemon Use a lock for this function to protect :return: serialized external command list :rtype: str ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def search(self): ''' Request available queries Posted data: {u'target': u''} Return the list of available target queries :return: See upper comment :rtype: list ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def query(self): ''' Request object passed to datasource.query function: { 'timezone': 'browser', 'panelId': 38, 'range': { 'from': '2018-08-29T02:38:09.633Z', 'to': '2018-08-29T03:38:09.633Z', 'raw': {'from': 'now-1h', 'to': 'now'} }, 'rangeRaw': {'from': 'now-1h', 'to': 'now'}, 'interval': '10s', 'intervalMs': 10000, 'targets': [ { 'target': 'problems', 'refId': 'A', 'type': 'table'} ], 'format': 'json', 'maxDataPoints': 314, 'scopedVars': { '__interval': {'text': '10s', 'value': '10s'}, '__interval_ms': {'text': 10000, 'value': 10000} } } Only the first target is considered. If several targets are required, an error is raised. The target is a string that is searched in the target_queries dictionary. If found the corresponding query is executed and the result is returned. Table response from datasource.query. An array of: [ { "type": "table", "columns": [ { "text": "Time", "type": "time", "sort": true, "desc": true, }, { "text": "mean", }, { "text": "sum", } ], "rows": [ [ 1457425380000, null, null ], [ 1457425370000, 1002.76215352, 1002.76215352 ], ] } ] :return: See upper comment :rtype: list ''' pass def _build_host_livestate(self, host_name, livestate): '''Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line ''' pass def _build_service_livestate(self, host_name, service_name, livestate): '''Build and notify the external command for a service livestate PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> Create and post a logcheckresult to the backend for the livestate :param host_name: the concerned host name :param service_name: the concerned service name :param livestate: livestate dictionary :return: external command line ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def host(self): '''Get a passive checks for an host and its services This function builds the external commands corresponding to the host and services provided information :param host_name: host name :param data: dictionary of the host properties to be modified :return: command line ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _wait_new_conf(self): '''Ask the daemon to drop its configuration and wait for a new one This overrides the default method from GenericInterface :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_configuration(self, pushed_configuration=None): '''Send a new configuration to the daemon This overrides the default method from GenericInterface Used by the master arbiter to send its configuration to a spare arbiter This function is not intended for external use. It is quite complex to build a configuration for a daemon and it is the arbter dispatcher job ;) :param pushed_configuration: new conf to send :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_out() def _do_not_run(self): '''The master arbiter tells to its spare arbiters to not run. A master arbiter will ignore this request and it will return an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error :return: None ''' pass @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_external_command(self, command=None): '''Only to maintain ascending compatibility... this function uses the inner *command* endpoint. :param command: Alignak external command :type command: string :return: None ''' pass
81
27
62
7
23
33
5
1.46
1
13
1
0
24
0
24
41
1,661
193
598
139
516
872
405
114
378
24
2
5
136
3,950
Alignak-monitoring/alignak
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Alignak-monitoring_alignak/alignak/http/broker_interface.py
alignak.http.broker_interface.BrokerInterface
class BrokerInterface(GenericInterface): """This class provides specific HTTP functions for the Broker daemons.""" ##### # ___ _ _ _ # |_ _| _ __ | |_ ___ _ __ _ __ __ _ | | ___ _ __ | | _ _ # | | | '_ \ | __| / _ \ | '__| | '_ \ / _` | | | / _ \ | '_ \ | | | | | | # | | | | | | | |_ | __/ | | | | | | | (_| | | | | (_) | | | | | | | | |_| | # |___| |_| |_| \__| \___| |_| |_| |_| \__,_| |_| \___/ |_| |_| |_| \__, | # |___/ ##### @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_broks(self): """Push the provided broks objects to the broker daemon Only used on a Broker daemon by the Arbiter :param: broks :type: list :return: None """ data = cherrypy.request.json with self.app.arbiter_broks_lock: logger.debug("Pushing %d broks", len(data['broks'])) self.app.arbiter_broks.extend( [unserialize(elem, True) for elem in data['broks']])
class BrokerInterface(GenericInterface): '''This class provides specific HTTP functions for the Broker daemons.''' @cherrypy.expose @cherrypy.tools.json_in() @cherrypy.tools.json_out() def _push_broks(self): '''Push the provided broks objects to the broker daemon Only used on a Broker daemon by the Arbiter :param: broks :type: list :return: None ''' pass
5
2
13
2
5
6
1
1.67
1
0
0
0
1
0
1
18
28
4
9
4
4
15
6
3
4
1
2
1
1
3,951
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_daemon_start.py
tests.test_daemon_start.Test_Receiver_Start
class Test_Receiver_Start(TemplateDaemonStart, AlignakTest): def setUp(self): super(Test_Receiver_Start, self).setUp() daemon_cls = Receiver daemon_name = 'my_receiver'
class Test_Receiver_Start(TemplateDaemonStart, AlignakTest): def setUp(self): pass
2
0
2
0
2
0
1
0
2
1
0
0
1
0
1
73
6
1
5
4
3
0
5
4
3
1
2
0
1
3,952
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/http/client.py
alignak.http.client.HTTPClientException
class HTTPClientException(Exception): """Simple HTTP Exception - raised for all requests exception except for a timeout""" pass
class HTTPClientException(Exception): '''Simple HTTP Exception - raised for all requests exception except for a timeout''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
10
3
0
2
1
1
1
2
1
1
0
3
0
0
3,953
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_daemon_start.py
tests.test_daemon_start.Test_Scheduler_Start
class Test_Scheduler_Start(TemplateDaemonStart, AlignakTest): def setUp(self): super(Test_Scheduler_Start, self).setUp() daemon_cls = Alignak daemon_name = 'my_scheduler'
class Test_Scheduler_Start(TemplateDaemonStart, AlignakTest): def setUp(self): pass
2
0
2
0
2
0
1
0
2
1
0
0
1
0
1
73
6
1
5
4
3
0
5
4
3
1
2
0
1
3,954
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/http/client.py
alignak.http.client.HTTPClientTimeoutException
class HTTPClientTimeoutException(Exception): # pragma: no cover, not with unit tests """HTTP Timeout Exception - raised when no response issued by the server in the specified time frame. This specific exception is raised when a requests Timeout exception is catched. Its attribute are: - uri: the requested URI, - timeout: the duration of the timeout. """ def __init__(self, timeout, uri): # Call the base class constructor with the parameters it needs super(HTTPClientTimeoutException, self).__init__() self.timeout = timeout self.uri = uri def __str__(self): # pragma: no cover """Exception to String""" return "Request timeout (%d seconds) for %s" % (self.timeout, self.uri)
class HTTPClientTimeoutException(Exception): '''HTTP Timeout Exception - raised when no response issued by the server in the specified time frame. This specific exception is raised when a requests Timeout exception is catched. Its attribute are: - uri: the requested URI, - timeout: the duration of the timeout. ''' def __init__(self, timeout, uri): pass def __str__(self): '''Exception to String''' pass
3
2
5
1
3
2
1
1.57
1
1
0
0
2
2
2
12
19
3
7
5
4
11
7
5
4
1
3
0
2
3,955
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/http/daemon.py
alignak.http.daemon.HTTPDaemon
class HTTPDaemon(object): """HTTP Server class. Mostly based on Cherrypy It uses CherryPyWSGIServer and daemon http_interface as Application """ # pylint: disable=too-many-arguments, unused-argument def __init__(self, host, port, http_interface, use_ssl, ca_cert, ssl_key, ssl_cert, server_dh, thread_pool_size, log_file=None, icon_file=None): """ Initialize HTTP daemon :param host: host address :param port: listening port :param http_interface: :param use_ssl: :param ca_cert: :param ssl_key: :param ssl_cert: :param thread_pool_size: :param log_file: if set, the log file for Cherrypy log :param icon_file: if set, the favicon file to use """ self.port = port self.host = host self.use_ssl = use_ssl self.uri = '%s://%s:%s' % ('https' if self.use_ssl else 'http', self.host, self.port) logger.debug("Configured HTTP server on %s, %d threads", self.uri, thread_pool_size) # This application config overrides the default processors # so we put them back in case we need them config = { '/': { 'request.body.processors': {'application/x-www-form-urlencoded': process_urlencoded, 'multipart/form-data': process_multipart_form_data, 'multipart': process_multipart, 'application/zlib': zlib_processor}, 'tools.gzip.on': True, 'tools.gzip.mime_types': ['text/*', 'application/json'], 'tools.response_headers.on': True, 'tools.response_headers.headers': [('Access-Control-Allow-Origin', '*')], 'tools.staticfile.on': True if icon_file else False, 'tools.staticfile.filename': icon_file } } # For embedding into a WSGI server # cherrypy.config.update({'environment': 'embedded'}) # Configure HTTP server # Available parameters (see https://github.com/cherrypy/cherrypy/ # blob/master/cherrypy/_cpserver.py) for more information if needed. # - socket_queue_size cherrypy.config.update({'engine.autoreload.on': False, 'server.thread_pool': thread_pool_size, 'server.socket_host': str(self.host), 'server.socket_port': self.port}) # Default is to disable CherryPy logging cherrypy.config.update({'log.screen': False, 'log.access_file': '', 'log.error_file': ''}) if log_file: # Log into the provided log file cherrypy.config.update({'log.screen': True, 'log.access_file': str(log_file), 'log.error_file': str(log_file)}) cherrypy.log.access_log.setLevel(logging.DEBUG) cherrypy.log.error_log.setLevel(logging.DEBUG) cherrypy.log("CherryPy logging: %s" % (log_file)) if use_ssl: # Configure SSL server certificate and private key # Parameters: # ssl_context = None # When using PyOpenSSL, an instance of SSL.Context. # ssl_certificate = None # The filename of the SSL certificate to use. # ssl_certificate_chain = None # When using PyOpenSSL, the certificate chain to pass to # Context.load_verify_locations. # ssl_private_key = None # The filename of the private key to use with SSL. # ssl_ciphers = None # The ciphers list of SSL. cherrypy.config.update({'server.ssl_certificate': ssl_cert, 'server.ssl_private_key': ssl_key}) cherrypy.log("Using PyOpenSSL: %s" % (PYOPENSSL)) if not PYOPENSSL: # Use CherryPy built-in module if PyOpenSSL is not installed cherrypy.config.update({'server.ssl_module': 'builtin'}) cherrypy.log("Using SSL certificate: %s" % (ssl_cert)) cherrypy.log("Using SSL private key: %s" % (ssl_key)) if ca_cert: cherrypy.config.update({'server.ssl_certificate_chain': ca_cert}) cherrypy.log("Using SSL CA certificate: %s" % ca_cert) # Mount the main application (an Alignak daemon interface) cherrypy.tree.mount(http_interface, '/', config) def run(self): """Wrapper to start the CherryPy server This function throws a PortNotFree exception if any socket error is raised. :return: None """ def _started_callback(): """Callback function when Cherrypy Engine is started""" cherrypy.log("CherryPy engine started and listening...") self.cherrypy_thread = None try: cherrypy.log("Starting CherryPy engine on %s" % (self.uri)) self.cherrypy_thread = cherrypy.engine.start_with_callback(_started_callback) cherrypy.engine.block() cherrypy.log("Exited from the engine block") except socket.error as exp: raise PortNotFree("Error: Sorry, the HTTP server did not started correctly: error: %s" % (str(exp))) def stop(self): # pylint: disable=no-self-use """Wrapper to stop the CherryPy server :return: None """ cherrypy.log("Stopping CherryPy engine (current state: %s)..." % cherrypy.engine.state) try: cherrypy.engine.exit() except RuntimeWarning: pass except SystemExit: cherrypy.log('SystemExit raised: shutting down bus') cherrypy.log("Stopped")
class HTTPDaemon(object): '''HTTP Server class. Mostly based on Cherrypy It uses CherryPyWSGIServer and daemon http_interface as Application ''' def __init__(self, host, port, http_interface, use_ssl, ca_cert, ssl_key, ssl_cert, server_dh, thread_pool_size, log_file=None, icon_file=None): ''' Initialize HTTP daemon :param host: host address :param port: listening port :param http_interface: :param use_ssl: :param ca_cert: :param ssl_key: :param ssl_cert: :param thread_pool_size: :param log_file: if set, the log file for Cherrypy log :param icon_file: if set, the favicon file to use ''' pass def run(self): '''Wrapper to start the CherryPy server This function throws a PortNotFree exception if any socket error is raised. :return: None ''' pass def _started_callback(): '''Callback function when Cherrypy Engine is started''' pass def stop(self): '''Wrapper to stop the CherryPy server :return: None ''' pass
5
5
33
3
18
12
3
0.74
1
4
1
0
3
5
3
3
134
15
69
13
63
51
46
11
41
7
1
2
13
3,956
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/check.py
alignak.check.Check
class Check(Action): # pylint: disable=too-many-instance-attributes """Check class implements monitoring concepts of checks :(status, state, output) Check instance are used to store monitoring plugins data (exit status, output) and used by schedule to raise alert, reschedule check etc. """ # AutoSlots create the __slots__ with properties and # running_properties names # FIXME : re-enable AutoSlots if possible # __metaclass__ = AutoSlots my_type = 'check' properties = Action.properties.copy() properties.update({ 'is_a': StringProp(default=u'check'), 'state': IntegerProp(default=0), 'depend_on': ListProp(default=[]), 'depend_on_me': ListProp(default=[], split_on_comma=False), 'passive_check': BoolProp(default=False), 'freshness_expiry_check': BoolProp(default=False), 'poller_tag': StringProp(default=u'None'), 'dependency_check': BoolProp(default=False), }) def __init__(self, params=None, parsing=False): super(Check, self).__init__(params, parsing=parsing) if self.command.startswith('_'): self.internal = True def __str__(self): # pragma: no cover return "Check %s %s, item: %s, status: %s, command:'%s'" % \ (self.uuid, "active" if not self.passive_check else "passive", self.ref, self.status, self.command) def get_return_from(self, check): """Update check data from action (notification for instance) :param check: action to get data from :type check: alignak.action.Action :return: None """ for prop in ['exit_status', 'output', 'long_output', 'check_time', 'execution_time', 'perf_data', 'u_time', 's_time']: setattr(self, prop, getattr(check, prop)) def set_type_active(self): """Set this check as an active one (indeed, not passive) :return: None """ self.passive_check = False def set_type_passive(self): """Set this check as a passive one :return: None """ self.passive_check = True def is_dependent(self): """Getter for dependency_check attribute :return: True if this check was created for a dependent one, False otherwise :rtype: bool """ return self.dependency_check def serialize(self, no_json=True, printing=False): """This function serializes into a simple dict object. The only usage is to send to poller, and it does not need to have the depend_on and depend_on_me properties. :return: json representation of a Check :rtype: dict """ res = super(Check, self).serialize(no_json=no_json, printing=printing) if 'depend_on' in res: del res['depend_on'] if 'depend_on_me' in res: del res['depend_on_me'] return res
class Check(Action): '''Check class implements monitoring concepts of checks :(status, state, output) Check instance are used to store monitoring plugins data (exit status, output) and used by schedule to raise alert, reschedule check etc. ''' def __init__(self, params=None, parsing=False): pass def __str__(self): pass def get_return_from(self, check): '''Update check data from action (notification for instance) :param check: action to get data from :type check: alignak.action.Action :return: None ''' pass def set_type_active(self): '''Set this check as an active one (indeed, not passive) :return: None ''' pass def set_type_passive(self): '''Set this check as a passive one :return: None ''' pass def is_dependent(self): '''Getter for dependency_check attribute :return: True if this check was created for a dependent one, False otherwise :rtype: bool ''' pass def serialize(self, no_json=True, printing=False): '''This function serializes into a simple dict object. The only usage is to send to poller, and it does not need to have the depend_on and depend_on_me properties. :return: json representation of a Check :rtype: dict ''' pass
8
6
8
1
4
3
2
0.67
1
1
0
0
7
2
7
22
93
18
46
14
38
31
26
14
18
3
4
1
12
3,957
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestStringProp
class TestStringProp(PropertyTests, AlignakTest): """Test the StringProp class""" prop_class = alignak.property.StringProp def test_pythonize(self): p = self.prop_class() assert p.pythonize("1") == "1" assert p.pythonize("yes") == "yes" assert p.pythonize("0") == "0" assert p.pythonize("no") == "no" assert p.pythonize(["yes", "no"]) == "no"
class TestStringProp(PropertyTests, AlignakTest): '''Test the StringProp class''' def test_pythonize(self): pass
2
1
7
0
7
0
1
0.11
2
0
0
2
1
0
1
60
12
2
9
4
7
1
9
4
7
1
2
0
1
3,958
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_downtimes.py
tests.test_downtimes.TestDowntime
class TestDowntime(AlignakTest): """ This class tests the downtimes """ def setUp(self): """ For each test load and check the configuration :return: None """ super(TestDowntime, self).setUp() self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) assert self.conf_is_correct # No error messages assert len(self.configuration_errors) == 0 # No warning messages assert len(self.configuration_warnings) == 0 def test_create_downtime(self): """ Create a downtime object """ now = int(time.time()) # With common parameters data = {'ref': 'host.uuid', 'ref_type': 'host.my_type', 'start_time': now, 'end_time': now + 5, 'fixed': True, 'trigger_id': '', 'duration': 0, 'author': 'me', 'comment': 'created by me!'} downtime = Downtime(data) expected = {'uuid': downtime.uuid} expected.update({ # Provided parameters 'ref': 'host.uuid', 'ref_type': 'host.my_type', 'start_time': now, 'end_time': now + 5, 'fixed': True, 'author': 'me', 'comment': 'created by me!', 'trigger_id': '', 'duration': 5.0, # Object created properties 'can_be_deleted': False, 'has_been_triggered': False, 'is_in_effect': False, 'activate_me': [], 'comment_id': '', 'entry_time': downtime.entry_time, 'real_end_time': downtime.end_time, }) assert expected == downtime.__dict__ assert str(downtime) == "inactive fixed Downtime id=%s %s - %s" \ % (downtime.uuid, time.ctime(downtime.start_time), time.ctime(downtime.end_time)) # A serialized downtime is the same as the __dict__ assert downtime.__dict__ == downtime.serialize() # Unserialize the serialized downtime unserialized_item = Downtime(params=downtime.serialize()) assert downtime.__dict__ == unserialized_item.__dict__ def test_schedule_fixed_svc_downtime(self): """ Schedule a fixed downtime for a service """ # Get the service svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults # Not any downtime yet ! assert svc.downtimes == {} # Get service scheduled downtime depth assert svc.scheduled_downtime_depth == 0 # No current notifications assert 0 == svc.current_notification_number, 'All OK no notifications' # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.event_handler_enabled = False # Clean broks to delete scheduler retention load message self._main_broker.broks = [] # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Make the service be OK self.scheduler_loop(1, [[svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # schedule a 15 minutes downtime now = int(time.time()) duration = 15 * 60 # downtime valid for 15 minutes from now cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;1;0;%d;" \ "downtime author;downtime comment" % (now, now, now + duration, duration) self._scheduler.run_external_commands([cmd]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop(1) # A downtime exist for the service assert len(svc.downtimes) == 1 downtime = list(svc.downtimes.values())[0] assert downtime.comment == "downtime comment" assert downtime.author == "downtime author" assert downtime.start_time == now assert downtime.end_time == now + duration assert downtime.duration == duration # Fixed assert downtime.fixed # Already active assert downtime.is_in_effect # Cannot be deleted assert not downtime.can_be_deleted assert downtime.trigger_id == "0" # Get service scheduled downtime depth scheduled_downtime_depth = svc.scheduled_downtime_depth assert svc.scheduled_downtime_depth == 1 assert 0 == svc.current_notification_number, 'Should not have any notification' # Notification: downtime start self.assert_actions_count(1) self.show_actions() # 1st notification for downtime start self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 ' '--notificationtype DOWNTIMESTART --servicestate OK ' '--serviceoutput OK', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=DOWNTIMESTART, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=downtime author, ' 'NOTIFICATIONAUTHORNAME=Not available, ' 'NOTIFICATIONAUTHORALIAS=Not available, ' 'NOTIFICATIONCOMMENT=downtime comment, ' 'HOSTNOTIFICATIONNUMBER=0, ' 'SERVICENOTIFICATIONNUMBER=0', 'command') # A comment exist in our service assert 1 == len(svc.comments) # Make the service be OK after a while # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) self.scheduler_loop(1, [[svc, 0, 'OK']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "OK" == svc.state assert 0 == svc.current_notification_number, 'Should not have any notification' # Still only 1 self.assert_actions_count(1) # The downtime still exist in our service assert 1 == len(svc.downtimes) # The service is currently in a downtime period assert svc.in_scheduled_downtime downtime = list(svc.downtimes.values())[0] assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Make the service be CRITICAL/SOFT # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "SOFT" == svc.state_type assert "CRITICAL" == svc.state assert 0 == svc.current_notification_number, 'Should not have any notification' # Still only 1 self.assert_actions_count(1) assert 1 == len(svc.downtimes) # The service is still in a downtime period assert svc.in_scheduled_downtime downtime = list(svc.downtimes.values())[0] assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Make the service be CRITICAL/HARD # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) self.scheduler_loop(1, [[svc, 2, 'BAD']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state assert 0 == svc.current_notification_number, 'Should not have any notification' # Now 2 actions because the service is a problem self.assert_actions_count(2) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The service is now a problem... self.assert_actions_match(1, 'VOID', 'command') self.assert_actions_match(1, 'PROBLEM', 'type') self.assert_actions_match(1, 'scheduled', 'status') self.show_actions() assert 1 == len(svc.downtimes) # The service is still in a downtime period assert svc.in_scheduled_downtime downtime = list(svc.downtimes.values())[0] assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Wait for a while, the service is back to OK but after the downtime expiry time # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=15)) self.scheduler_loop(1, [[svc, 0, 'OK']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "OK" == svc.state # No more downtime for the service nor the scheduler assert 0 == len(svc.downtimes) # The service is not anymore in a scheduled downtime period assert not svc.in_scheduled_downtime assert svc.scheduled_downtime_depth < scheduled_downtime_depth # No more comment for the service assert 0 == len(svc.comments) assert 0 == svc.current_notification_number, 'Should not have any notification' # Only 2 actions because of the downtime - no notifications raised self.show_actions() self.assert_actions_count(2) # 1st notification for downtime start self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype DOWNTIMESTART --servicestate OK --serviceoutput OK', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=DOWNTIMESTART, NOTIFICATIONRECIPIENTS=test_contact', 'command') # 2nd notification for downtime end self.assert_actions_match(1, 'notifier.pl --hostname test_host_0 --servicedesc test_ok_0 --notificationtype DOWNTIMEEND --servicestate OK --serviceoutput OK', 'command') self.assert_actions_match(1, 'NOTIFICATIONTYPE=DOWNTIMEEND, NOTIFICATIONRECIPIENTS=test_contact', 'command') # Clear actions self.clear_actions() # Make the service be CRITICAL/HARD self.scheduler_loop(2, [[svc, 2, 'BAD']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state # 2 actions because the service is a problem and a notification is raised self.show_actions() self.assert_actions_count(2) # The service is now a problem... # A problem notification is now raised... self.assert_actions_match(0, 'notification', 'is_a') self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'PROBLEM', 'type') self.assert_actions_match(0, 'scheduled', 'status') # VOID notification self.assert_actions_match(1, 'VOID', 'command') self.assert_actions_match(1, 'PROBLEM', 'type') self.assert_actions_match(1, 'scheduled', 'status') # We got 'monitoring_log' broks for logging to the monitoring events.. # no_date to avoid comparing the events timestamp ! monitoring_events = self.get_monitoring_events(no_date=True) print("monitoring events: %s" % monitoring_events) expected_logs = [ ('info', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;0;OK'), ('info', u'EXTERNAL COMMAND: [1527877861] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' u'1527877861;1527878761;1;0;900;downtime author;downtime comment'), ('info', u'SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STARTED; ' u'Service has entered a period of scheduled downtime'), ('info', u'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;DOWNTIMESTART (OK);0;notify-service;OK'), ('info', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;1;OK'), ('error', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;CRITICAL'), ('error', u'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;CRITICAL'), ('error', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;BAD'), ('error', u'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;BAD'), ('info', u'SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STOPPED; Service has exited from a period of scheduled downtime'), ('info', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;OK;2;OK'), ('info', u'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;OK'), ('info', u'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;DOWNTIMEEND (OK);0;notify-service;OK'), ('error', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;BAD'), ('error', u'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;BAD'), ('error', u'ACTIVE SERVICE CHECK: test_host_0;test_ok_0;CRITICAL;1;BAD'), ('error', u'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;BAD'), ('error', u'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;CRITICAL;1;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs) def test_schedule_flexible_svc_downtime(self): """ Schedule a flexible downtime for a service """ # Get the service svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults # Not any downtime yet ! assert svc.downtimes == {} # Get service scheduled downtime depth assert svc.scheduled_downtime_depth == 0 # No current notifications assert 0 == svc.current_notification_number, 'All OK no notifications' # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.event_handler_enabled = False # Make the service be OK self.scheduler_loop(1, [[svc, 0, 'OK']]) # ---------------------------------------------------------------- # schedule a flexible downtime of 5 seconds for the service # The downtime will start between now and now + 1 hour and it # will be active for 5 seconds # ---------------------------------------------------------------- duration = 5 now = int(time.time()) cmd = "[%lu] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;%d;%d;0;0;%d;" \ "downtime author;downtime comment" % (now, now, now + 3600, duration) self._scheduler.run_external_commands([cmd]) self.external_command_loop() # A downtime exist for the service assert len(svc.downtimes) == 1 downtime = list(svc.downtimes.values())[0] assert downtime.comment == "downtime comment" assert downtime.author == "downtime author" assert downtime.start_time == now assert downtime.end_time == now + 3600 assert downtime.duration == duration # Not fixed assert not downtime.fixed # Not yet active assert not downtime.is_in_effect # Cannot be deleted assert not downtime.can_be_deleted assert downtime.trigger_id == "0" # Get service scheduled downtime depth -> 0 no downtime scheduled_downtime_depth = svc.scheduled_downtime_depth assert svc.scheduled_downtime_depth == 0 assert 0 == svc.current_notification_number, 'Should not have any notification' # No notifications, downtime did not started ! self.assert_actions_count(0) # A comment exist in our service assert 1 == len(svc.comments) # ---------------------------------------------------------------- # run the service and return an OK status # check if the downtime is still inactive # ---------------------------------------------------------------- self.scheduler_loop(2, [[svc, 0, 'OK']]) assert "HARD" == svc.state_type assert "OK" == svc.state assert 1 == len(svc.downtimes) assert not svc.in_scheduled_downtime downtime = list(svc.downtimes.values())[0] assert not downtime.fixed assert not downtime.is_in_effect assert not downtime.can_be_deleted # No notifications, downtime did not started ! assert 0 == svc.current_notification_number, 'Should not have any notification' self.assert_actions_count(0) time.sleep(1) # ---------------------------------------------------------------- # run the service to get a soft critical status # check if the downtime is still inactive # ---------------------------------------------------------------- self.scheduler_loop(1, [[svc, 2, 'BAD']]) assert "SOFT" == svc.state_type assert "CRITICAL" == svc.state assert 1 == len(svc.downtimes) downtime = list(svc.downtimes.values())[0] assert not svc.in_scheduled_downtime assert not downtime.fixed assert not downtime.is_in_effect assert not downtime.can_be_deleted # No notifications, downtime did not started ! assert 0 == svc.current_notification_number, 'Should not have any notification' self.assert_actions_count(0) time.sleep(1) # ---------------------------------------------------------------- # run the service again to get a hard critical status # check if the downtime is active now # ---------------------------------------------------------------- time.sleep(1.0) self.scheduler_loop(1, [[svc, 2, 'BAD']]) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state time.sleep(1) assert 1 == len(svc.downtimes) downtime = list(svc.downtimes.values())[0] assert svc.in_scheduled_downtime assert not downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # 2 actions because the service is a problem and the downtime started self.assert_actions_count(2) # The downtime started self.assert_actions_match(-1, '/notifier.pl', 'command') self.assert_actions_match(-1, 'DOWNTIMESTART', 'type') self.assert_actions_match(-1, 'scheduled', 'status') # The service is now a problem... but no notification self.assert_actions_match(-1, 'VOID', 'command') self.assert_actions_match(-1, 'PROBLEM', 'type') self.assert_actions_match(-1, 'scheduled', 'status') # The downtime is now active assert downtime.is_in_effect # Get service scheduled downtime depth -> 0 no downtime scheduled_downtime_depth = svc.scheduled_downtime_depth assert svc.scheduled_downtime_depth == 1 # Wait for a while, the service recovers time.sleep(1) self.scheduler_loop(1, [[svc, 0, 'OK']]) assert "HARD" == svc.state_type assert "OK" == svc.state # Wait for a while, the service is still CRITICAL but after the downtime expiry time time.sleep(5) self.scheduler_loop(2, [[svc, 2, 'BAD']]) time.sleep(1.0) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state # No more downtime for the service nor the scheduler assert 0 == len(svc.downtimes) # The service is not anymore in a scheduled downtime period assert not svc.in_scheduled_downtime assert svc.scheduled_downtime_depth < scheduled_downtime_depth # No more comment for the service assert 0 == len(svc.comments) # Now 4 actions because the service is no more a problem and the downtime ended self.show_actions() self.assert_actions_count(4) # The downtime started self.assert_actions_match(-1, '/notifier.pl', 'command') self.assert_actions_match(-1, 'DOWNTIMESTART', 'type') self.assert_actions_match(-1, 'scheduled', 'status') # The downtime ended self.assert_actions_match(-1, '/notifier.pl', 'command') self.assert_actions_match(-1, 'DOWNTIMEEND', 'type') self.assert_actions_match(-1, 'scheduled', 'status') # The service is now a problem... with no notification self.assert_actions_match(-1, 'VOID', 'command') self.assert_actions_match(-1, 'PROBLEM', 'type') self.assert_actions_match(-1, 'scheduled', 'status') # The service is now a problem... with a notification self.assert_actions_match(-1, '/notifier.pl', 'command') self.assert_actions_match(-1, 'PROBLEM', 'type') self.assert_actions_match(-1, 'scheduled', 'status') # We got 'monitoring_log' broks for logging to the monitoring events.. # no_date to avoid comparing the events timestamp ! monitoring_events = self.get_monitoring_events(no_date=True) print("monitoring events: %s" % monitoring_events) expected_logs = [ ('info', 'EXTERNAL COMMAND: [%s] SCHEDULE_SVC_DOWNTIME;test_host_0;test_ok_0;' '%s;%s;0;0;%s;downtime author;downtime comment' % ( now, now, now + 3600, duration)), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;BAD'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;BAD'), ('info', 'SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STARTED; ' 'Service has entered a period of scheduled downtime'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'DOWNTIMESTART (CRITICAL);0;notify-service;BAD'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;OK'), ('info', 'SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STOPPED; ' 'Service has exited from a period of scheduled downtime'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'DOWNTIMEEND (OK);0;notify-service;OK'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;BAD'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;BAD'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;' 'CRITICAL;1;notify-service;BAD') ] for log_level, log_message in expected_logs: assert (log_level, log_message) in monitoring_events def test_schedule_fixed_host_downtime(self): """ Schedule a fixed downtime for an host """ # Get the host host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] assert host.enable_notifications assert host.notifications_enabled assert host.notification_period # Not any downtime yet ! assert host.downtimes == {} # Get service scheduled downtime depth assert host.scheduled_downtime_depth == 0 # No current notifications assert 0 == host.current_notification_number, 'All OK no notifications' # To make tests quicker we make notifications send very quickly host.notification_interval = 0.001 host.event_handler_enabled = False # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Make the host be OK self.scheduler_loop(1, [[host, 0, 'UP']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # schedule a 15 minutes downtime now = int(time.time()) duration = 15 * 60 cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_host_0;%d;%d;1;;%d;" \ "downtime author;downtime comment" % (now, now, now + duration, duration) self._scheduler.run_external_commands([cmd]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop(1) # A downtime exist for the host assert len(host.downtimes) == 1 downtime = list(host.downtimes.values())[0] assert downtime.comment == "downtime comment" assert downtime.author == "downtime author" assert downtime.start_time == now assert downtime.end_time == now + duration assert downtime.duration == duration # Fixed assert downtime.fixed # Already active assert downtime.is_in_effect # Cannot be deleted assert not downtime.can_be_deleted assert downtime.trigger_id == "" # Get host scheduled downtime depth scheduled_downtime_depth = host.scheduled_downtime_depth assert host.scheduled_downtime_depth == 1 assert 0 == host.current_notification_number, 'Should not have any notification' # The host is currently in a downtime period assert host.in_scheduled_downtime assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Notification: downtime start self.show_actions() self.assert_actions_count(1) self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 ' '--notificationtype DOWNTIMESTART ' '--hoststate UP --hostoutput UP', 'command') self.assert_actions_match(0, 'NOTIFICATIONTYPE=DOWNTIMESTART, ' 'NOTIFICATIONRECIPIENTS=test_contact, ' 'NOTIFICATIONISESCALATED=False, ' 'NOTIFICATIONAUTHOR=downtime author, ' 'NOTIFICATIONAUTHORNAME=Not available, ' 'NOTIFICATIONAUTHORALIAS=Not available, ' 'NOTIFICATIONCOMMENT=downtime comment, ' 'HOSTNOTIFICATIONNUMBER=0, ' 'SERVICENOTIFICATIONNUMBER=0', 'command') # A comment exists in our host assert 1 == len(host.comments) # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Raise an host check self.scheduler_loop(2, [[host, 0, 'UP']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "UP" == host.state assert 0 == host.current_notification_number, 'Should not have any notification' # Still only 1 action self.show_actions() self.assert_actions_count(1) # The downtime still exist in our host assert 1 == len(host.downtimes) downtime = list(host.downtimes.values())[0] # The host is currently in a downtime period assert host.in_scheduled_downtime assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Make the host be DOWN/SOFT self.scheduler_loop(1, [[host, 2, 'DOWN']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "SOFT" == host.state_type assert "DOWN" == host.state assert 0 == host.current_notification_number, 'Should not have any notification' # Still only 1 action self.show_actions() self.assert_actions_count(1) assert 1 == len(host.downtimes) downtime = list(host.downtimes.values())[0] # The host is still in a downtime period assert host.in_scheduled_downtime assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Make the host be DOWN/HARD self.scheduler_loop(2, [[host, 2, 'DOWN']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "DOWN" == host.state assert 0 == host.current_notification_number, 'Should not have any notification' # Still only 1 action and a master problem notification self.show_actions() self.assert_actions_count(2) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The host is now a problem... master notification self.assert_actions_match(1, 'VOID', 'command') self.assert_actions_match(1, 'PROBLEM', 'type') self.assert_actions_match(1, 'scheduled', 'status') assert 1 == len(host.downtimes) downtime = list(host.downtimes.values())[0] # The host is still in a downtime period assert host.in_scheduled_downtime assert downtime.fixed assert downtime.is_in_effect assert not downtime.can_be_deleted # Wait for a while, the host will go UP but after the downtime expiry time # Time warp 15 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=15)) self.scheduler_loop(1, [[host, 0, 'UP']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "UP" == host.state self.show_actions() # 2 actions: host downtime start and end. self.assert_actions_count(2) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The downtime ended self.assert_actions_match(1, '/notifier.pl', 'command') self.assert_actions_match(1, 'DOWNTIMEEND', 'type') self.assert_actions_match(1, 'scheduled', 'status') # No more downtime for the host nor the scheduler assert 0 == len(host.downtimes) # No more comment for the host assert 0 == len(host.comments) # The host is not anymore in a scheduled downtime period assert not host.in_scheduled_downtime assert host.scheduled_downtime_depth < scheduled_downtime_depth assert 0 == host.current_notification_number, 'Should not have any notification' # Now 4 actions because the host is no more a problem and the downtime ended # Only 2 notifications because the other ones got removed because of the state changing self.show_actions() self.assert_actions_count(2) # The downtime started self.assert_actions_match(0, 'notifier.pl --hostname test_host_0 --notificationtype DOWNTIMESTART --hoststate UP --hostoutput UP', 'command') # # The downtime ended self.assert_actions_match(1, 'notifier.pl --hostname test_host_0 --notificationtype DOWNTIMEEND --hoststate UP --hostoutput UP', 'command') # Clear actions self.clear_actions() # Make the host be DOWN/HARD self.scheduler_loop(3, [[host, 2, 'DOWN']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "DOWN" == host.state # 2 actions because the host is a problem and a notification is raised self.show_actions() self.assert_actions_count(2) # The host is now a problem... # A problem notification is now raised... self.assert_actions_match(0, 'notification', 'is_a') self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'PROBLEM', 'type') self.assert_actions_match(0, 'scheduled', 'status') self.assert_actions_match(1, 'VOID', 'command') self.assert_actions_match(1, 'PROBLEM', 'type') self.assert_actions_match(1, 'scheduled', 'status') # We got 'monitoring_log' broks for logging to the monitoring events.. # no_date to avoid comparing the events timestamp ! monitoring_events = self.get_monitoring_events(no_date=True) print("monitoring events: %s" % monitoring_events) expected_logs = [ ('info', 'EXTERNAL COMMAND: [%s] SCHEDULE_HOST_DOWNTIME;test_host_0;%s;%s;1;;%s;' 'downtime author;downtime comment' % ( now, now, now + duration, duration)), ('info', 'HOST DOWNTIME ALERT: test_host_0;STARTED; ' 'Host has entered a period of scheduled downtime'), ('info', 'HOST NOTIFICATION: test_contact;test_host_0;' 'DOWNTIMESTART (UP);0;notify-host;UP'), ('error', 'HOST ALERT: test_host_0;DOWN;SOFT;1;DOWN'), ('error', 'HOST ALERT: test_host_0;DOWN;SOFT;2;DOWN'), ('error', 'HOST ALERT: test_host_0;DOWN;HARD;3;DOWN'), ('info', 'HOST DOWNTIME ALERT: test_host_0;STOPPED; ' 'Host has exited from a period of scheduled downtime'), ('info', 'HOST NOTIFICATION: test_contact;test_host_0;' 'DOWNTIMEEND (UP);0;notify-host;UP'), ('error', 'HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;DOWN'), ('info', 'HOST ALERT: test_host_0;UP;HARD;3;UP'), # ('info', 'HOST NOTIFICATION: test_contact;test_host_0;UP;notify-host;UP'), ('error', 'HOST ALERT: test_host_0;DOWN;SOFT;1;DOWN'), ('error', 'HOST ALERT: test_host_0;DOWN;SOFT;2;DOWN'), ('error', 'HOST ALERT: test_host_0;DOWN;HARD;3;DOWN'), ('error', 'HOST NOTIFICATION: test_contact;test_host_0;DOWN;1;notify-host;DOWN') ] for log_level, log_message in expected_logs: assert (log_level, log_message) in monitoring_events, "Not found: %s" % log_message def test_schedule_fixed_host_downtime_with_service(self): """ Schedule a downtime for an host - services changes are not notified """ # Get the host host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # Not any downtime yet ! assert host.downtimes == {} # Get service scheduled downtime depth assert host.scheduled_downtime_depth == 0 # No current notifications assert 0 == host.current_notification_number, 'All OK no notifications' # To make tests quicker we make notifications send very quickly host.notification_interval = 0.001 host.event_handler_enabled = False # Get the service svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults # Not any downtime yet ! assert svc.downtimes == {} # Get service scheduled downtime depth assert svc.scheduled_downtime_depth == 0 # No current notifications assert 0 == svc.current_notification_number, 'All OK no notifications' # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.event_handler_enabled = False # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # Make the host and service be OK self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # schedule a 15 minutes downtime now = int(time.time()) duration = 15 * 60 cmd = "[%lu] SCHEDULE_HOST_DOWNTIME;test_host_0;%d;%d;1;;%d;" \ "downtime author;downtime comment" % (now, now, now + duration, duration) self._scheduler.run_external_commands([cmd]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.external_command_loop(1) # A downtime exist for the host assert len(host.downtimes) == 1 downtime = list(host.downtimes.values())[0] assert downtime.comment == "downtime comment" assert downtime.author == "downtime author" assert downtime.start_time == now assert downtime.end_time == now + duration assert downtime.duration == duration # Fixed assert downtime.fixed # Already active assert downtime.is_in_effect # Cannot be deleted assert not downtime.can_be_deleted assert downtime.trigger_id == "" # Get host scheduled downtime depth scheduled_downtime_depth = host.scheduled_downtime_depth assert host.scheduled_downtime_depth == 1 assert 0 == host.current_notification_number, 'Should not have any notification' # Notification: downtime start self.assert_actions_count(1) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # A comment exist in our host assert 1 == len(host.comments) # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Make the host be DOWN/HARD self.scheduler_loop(3, [[host, 2, 'DOWN']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "DOWN" == host.state assert 0 == host.current_notification_number, 'Should not have any notification' # Now 2 actions because the host is a problem self.assert_actions_count(2) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The host is now a problem... self.assert_actions_match(1, 'VOID', 'command') self.assert_actions_match(1, 'PROBLEM', 'type') self.assert_actions_match(1, 'scheduled', 'status') # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Make the service be CRITICAL/HARD self.scheduler_loop(3, [[svc, 2, 'CRITICAL']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "DOWN" == host.state assert "HARD" == svc.state_type assert "CRITICAL" == svc.state # Still only 1 downtime assert 1 == len(host.downtimes) # No downtime for the service assert 0 == len(svc.downtimes) assert not svc.in_scheduled_downtime # The host is still in a scheduled downtime assert self._scheduler.find_item_by_id(svc.host).in_scheduled_downtime assert 0 == host.current_notification_number, 'Should not have any notification' assert 0 == svc.current_notification_number, 'Should not have any notification' # Now 3 actions because the host and its service are problems self.assert_actions_count(3) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The host is always a problem... self.assert_actions_match(1, 'VOID', 'command') self.assert_actions_match(1, 'PROBLEM', 'type') self.assert_actions_match(1, 'scheduled', 'status') # The service is now a problem... self.assert_actions_match(2, 'VOID', 'command') self.assert_actions_match(2, 'PROBLEM', 'type') self.assert_actions_match(2, 'scheduled', 'status') # Wait for a while, the host and the service will go OK but after the downtime # expiry time # Time warp 15 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=15)) # Make the service be OK/HARD self.scheduler_loop(1, [[svc, 0, 'OK']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "DOWN" == host.state assert "HARD" == svc.state_type assert "OK" == svc.state # Time warp 1 minutes frozen_datetime.tick(delta=datetime.timedelta(minutes=1)) # Make the host be UP/HARD self.scheduler_loop(2, [[host, 0, 'UP']]) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == host.state_type assert "UP" == host.state assert "HARD" == svc.state_type assert "OK" == svc.state assert 0 == host.current_notification_number, 'Should not have any notification' assert 0 == svc.current_notification_number, 'Should not have any notification' # 2 actions: host downtime start and end. Former host problem and recovery are # master notifications that have been removed on downtime end # But service problem / recovery notifications are still active self.assert_actions_count(5) # The downtime started self.assert_actions_match(0, '/notifier.pl', 'command') self.assert_actions_match(0, 'DOWNTIMESTART', 'type') self.assert_actions_match(0, 'scheduled', 'status') # The downtime ended self.assert_actions_match(2, '/notifier.pl', 'command') self.assert_actions_match(2, 'DOWNTIMEEND', 'type') self.assert_actions_match(2, 'scheduled', 'status') # We got 'monitoring_log' broks for logging to the monitoring events.. # no_date to avoid comparing the events timestamp ! monitoring_events = self.get_monitoring_events(no_date=True) print("monitoring events: %s" % monitoring_events) expected_logs = [ ('info', 'EXTERNAL COMMAND: [%s] SCHEDULE_HOST_DOWNTIME;test_host_0;' '%s;%s;1;;%s;downtime author;downtime comment' % ( now, now, now + duration, duration)), ('info', 'HOST DOWNTIME ALERT: test_host_0;STARTED; ' 'Host has entered a period of scheduled downtime'), ('info', 'HOST NOTIFICATION: test_contact;test_host_0;DOWNTIMESTART (UP);0;notify-host;UP'), ('error', 'HOST ALERT: test_host_0;DOWN;SOFT;1;DOWN'), ('error', 'HOST ALERT: test_host_0;DOWN;SOFT;2;DOWN'), ('error', 'HOST ALERT: test_host_0;DOWN;HARD;3;DOWN'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;SOFT;1;CRITICAL'), ('error', 'SERVICE ALERT: test_host_0;test_ok_0;CRITICAL;HARD;2;CRITICAL'), ('info', 'SERVICE ALERT: test_host_0;test_ok_0;OK;HARD;2;OK'), ('info', 'HOST ALERT: test_host_0;UP;HARD;3;UP') ] for log_level, log_message in expected_logs: assert (log_level, log_message) in monitoring_events
class TestDowntime(AlignakTest): ''' This class tests the downtimes ''' def setUp(self): ''' For each test load and check the configuration :return: None ''' pass def test_create_downtime(self): ''' Create a downtime object ''' pass def test_schedule_fixed_svc_downtime(self): ''' Schedule a fixed downtime for a service ''' pass def test_schedule_flexible_svc_downtime(self): ''' Schedule a flexible downtime for a service ''' pass def test_schedule_fixed_host_downtime(self): ''' Schedule a fixed downtime for an host ''' pass def test_schedule_fixed_host_downtime_with_service(self): ''' Schedule a downtime for an host - services changes are not notified ''' pass
7
7
166
19
111
38
2
0.34
1
7
1
0
6
0
6
61
1,005
116
664
54
657
228
512
51
505
2
2
2
9
3,959
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_end_parsing_types.py
tests.test_end_parsing_types.TestEndParsingType
class TestEndParsingType(AlignakTest): """ This class test properties types after config loaded and parsed """ def setUp(self): super(TestEndParsingType, self).setUp() def check_object_property(self, obj, prop): """ Check the property of an object :param obj: object reference :type obj: object :param prop: property name :type prop: str :return: None """ value = getattr(obj, prop, None) if value is not None: obj_expected_type = self.map_type(obj.properties[prop]) # These properties are pure bytes string if prop in ['uuid', 'hash', 'push_flavor', 'instance_id', 'host_name']: obj_expected_type = bytes # These properties are containing the name or uuid of other items! # Sometimes it is the name and sometimes it is the uuid!!!!! # host_name may be a bytes string (socket name) or a string (host dependency) ! # address6 may be an IPv6 address or a contact address field! # todo: change this and then modify the test! if prop in ['host_name', 'address6', 'instance_id', 'push_flavor', 'hash', 'imported_from']: return if prop in ['realm', 'check_period', 'check_command', 'event_handler', 'snapshot_period', 'maintenance_period', 'notification_period', 'service_notification_period', 'host_notification_period']: return assert isinstance(value, obj_expected_type), \ "The %s property isn't a %s: %s, value=%s, for: %s" \ % (prop, obj_expected_type, value.__class__, value, obj) @staticmethod def map_type(obj): """ Detect type of a property :param obj: get type of object :type obj: object :return: instance type """ if isinstance(obj, ListProp): return list if isinstance(obj, StringProp): try: return unicode except NameError: return str if isinstance(obj, UnusedProp): try: return unicode except NameError: return str if isinstance(obj, BoolProp): return bool if isinstance(obj, IntegerProp): return int if isinstance(obj, FloatProp): return float if isinstance(obj, CharProp): return str if isinstance(obj, DictProp): return dict if isinstance(obj, AddrProp): try: return unicode except NameError: return str if isinstance(obj, ToGuessProp): try: return unicode except NameError: return str def check_objects_from(self, container): """ Check properties of an alignak item :param container: object / alignak item :type container: object :return: None """ assert isinstance(container, Items) for obj in container: for prop in obj.properties: self.check_object_property(obj, prop) def test_types(self): # pylint: disable=R0912 """ Test properties types :return: None """ self.setup_with_file('cfg/cfg_default.cfg') for objects in (self._arbiter.conf.arbiters, self._arbiter.conf.contacts, self._arbiter.conf.notificationways, self._arbiter.conf.hosts): self.check_objects_from(objects) print("== test Check() ==") check = Check({'status': u'OK', 'command': u'check_ping', 'ref': 0, 't_to_go': 10.0}) for prop in check.properties: if hasattr(check, prop): value = getattr(check, prop) if prop not in ['ref']: # TODO : clean this # We should get rid of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) obj_expected_type = self.map_type(check.properties[prop]) # These properties are pure bytes string if prop in ['uuid']: obj_expected_type = bytes assert isinstance(value, obj_expected_type), \ "The %s attr/property of %s object isn't a %s: %s, value=%s" \ % (prop, check.properties, obj_expected_type, value.__class__, value) else: print("Skipping %s " % prop) print("== test Notification() ==") notification = Notification() for prop in notification.properties: if not hasattr(notification, prop): continue value = getattr(notification, prop) # We should get ride of None, maybe use the "neutral" value for type if prop not in ['already_start_escalations']: # TODO : clean this if value is not None: print("TESTING %s with value %s" % (prop, value)) obj_expected_type = self.map_type(notification.properties[prop]) # These properties are pure bytes string if prop in ['uuid']: obj_expected_type = bytes assert isinstance(value, obj_expected_type), \ "The %s attr/property of %s object isn't a %s: %s, value=%s" \ % (prop, notification.properties, obj_expected_type, value.__class__, value) else: print("Skipping %s " % prop) print("== test EventHandler() ==") eventhandler = EventHandler({}) for prop in eventhandler.properties: if not hasattr(eventhandler, prop): continue value = getattr(eventhandler, prop) if value is not None: print("TESTING %s with value %s" % (prop, value)) obj_expected_type = self.map_type(eventhandler.properties[prop]) # These properties are pure bytes string if prop in ['uuid', 'command']: obj_expected_type = bytes if prop in ['command']: continue assert isinstance(value, obj_expected_type), \ "The '%s' attr/property of %s object isn't a %s: %s, value=%s" \ % (prop, eventhandler.properties, obj_expected_type, value.__class__, value) else: print("Skipping %s " % prop) print("== test Timeperiod() ==") timeperiod = Timeperiod({}) for prop in timeperiod.properties: if not hasattr(timeperiod, prop): continue value = getattr(timeperiod, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) obj_expected_type = self.map_type(timeperiod.properties[prop]) # These properties are pure bytes string if prop in ['uuid']: obj_expected_type = bytes assert isinstance(value, obj_expected_type), \ "The %s attr/property of %s object isn't a %s: %s, value=%s" \ % (prop, timeperiod.properties, obj_expected_type, value.__class__, value) else: print("Skipping %s " % prop) print("== test Command() ==") command = Command({}) for prop in command.properties: if hasattr(command, prop): value = getattr(command, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) obj_expected_type = self.map_type(command.properties[prop]) # These properties are pure bytes string if prop in ['uuid']: obj_expected_type = bytes assert isinstance(value, obj_expected_type), \ "The %s attr/property of %s object isn't a %s: %s, value=%s" \ % (prop, command.properties, obj_expected_type, value.__class__, value) else: print("Skipping %s " % prop)
class TestEndParsingType(AlignakTest): ''' This class test properties types after config loaded and parsed ''' def setUp(self): pass def check_object_property(self, obj, prop): ''' Check the property of an object :param obj: object reference :type obj: object :param prop: property name :type prop: str :return: None ''' pass @staticmethod def map_type(obj): ''' Detect type of a property :param obj: get type of object :type obj: object :return: instance type ''' pass def check_objects_from(self, container): ''' Check properties of an alignak item :param container: object / alignak item :type container: object :return: None ''' pass def test_types(self): ''' Test properties types :return: None ''' pass
7
5
41
5
29
8
10
0.28
1
25
16
0
4
0
5
60
214
31
145
20
138
41
123
19
117
25
2
5
49
3,960
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_environment.py
tests.test_environment.TestEnvironment
class TestEnvironment(AlignakTest): """ This class tests the environment (eg. main configuration) file """ def setUp(self): super(TestEnvironment, self).setUp() def test_config_ko(self): """ Tests error in the configuration parser calling :return: None """ # Configuration file name empty with pytest.raises(ValueError): args = {'<cfg_file>': None, '--verbose': True} self.alignak_env = AlignakConfigParser(args) self.alignak_env.parse() # Configuration file does not exist with pytest.raises(ValueError): # Get Alignak environment args = {'<cfg_file>': 'unexisting.file', '--verbose': True} self.alignak_env = AlignakConfigParser(args) self.alignak_env.parse() def test_config_ko_content(self): """ Configuration has some loading problems ... :return: None """ # No defined sections configuration_file = os.path.join(self._test_dir, os.path.join('./cfg/environment', 'alignak_no_sections.ini')) # Configuration file does not exist with pytest.raises(ValueError): # Get Alignak environment args = {'<cfg_file>': configuration_file, '--verbose': True} self.alignak_env = AlignakConfigParser(args) self.alignak_env.parse() # -------------------- # Syntax error configuration_file = os.path.join(self._test_dir, os.path.join('./cfg/environment', 'alignak_section_syntax.ini')) args = {'<cfg_file>': configuration_file, '--verbose': True} self.alignak_env = AlignakConfigParser(args) assert not self.alignak_env.parse() # -------------------- # Interpolation error configuration_file = os.path.join(self._test_dir, os.path.join('./cfg/environment', 'alignak_section_syntax2.ini')) # Get Alignak environment args = {'<cfg_file>': configuration_file, '--verbose': True} self.alignak_env = AlignakConfigParser(args) assert not self.alignak_env.parse() def test_config_ok(self): """ Default shipped configuration has no loading problems ... :return: None """ # Get Alignak environment with the default shipped configuration file args = { '<cfg_file>': os.path.join(self._test_dir, '../etc/alignak.ini'), '--verbose': True } print("Args: %s" % args) self.alignak_env = AlignakConfigParser(args) assert self.alignak_env.parse() def test_config_several_files_ok(self): """ Default shipped configuration has no loading problems ... :return: None """ cwd = self._test_dir # Default shipped configuration file configuration_file = os.path.join(self._test_dir, os.path.join('./cfg/environment/several_files', 'alignak_ok.ini')) # Get Alignak environment args = {'<cfg_file>': configuration_file, '--verbose': True} self.alignak_env = AlignakConfigParser(args) assert self.alignak_env.parse() default_section = OrderedDict([ ('_dist', '/tmp'), ('_dist_etc', '%(_dist)s/etc/alignak'), ('config_name', 'Alignak global configuration'), ('extra_config_name', 'extra') ]) assert self.alignak_env.get_defaults() == default_section # Variables prefixed with an _ will be considered as Alignak macros macros = OrderedDict([ ('_dist', '/tmp'), ('_dist_etc', '/tmp/etc/alignak') ]) assert self.alignak_env.get_alignak_macros() == macros assert self.alignak_env.get_legacy_cfg_files() == {} arbiter_master = { '_dist': '/tmp', '_dist_etc': '/tmp/etc/alignak', 'config_name': 'Alignak global configuration', 'extra_config_name': 'extra', 'imported_from': os.path.join(cwd, 'cfg/environment/several_files/alignak_ok.ini'), 'type': 'arbiter', 'name': 'arbiter-master', 'modules': 'web-services' } daemons = { 'daemon.arbiter-master': arbiter_master, 'daemon.arbiter-spare': { '_dist': '/tmp', '_dist_etc': '/tmp/etc/alignak', 'config_name': 'Alignak global configuration', 'extra_config_name': 'extra', 'imported_from': os.path.join(cwd, 'cfg/environment/several_files/alignak_ok.ini'), 'type': 'arbiter', 'name': 'arbiter-spare' }, 'daemon.poller-master': { '_dist': '/tmp', '_dist_etc': '/tmp/etc/alignak', 'config_name': 'Alignak global configuration', 'extra_config_name': 'extra', 'imported_from': os.path.join(cwd, 'cfg/environment/several_files/alignak_ok.ini'), 'type': 'poller', 'name': 'poller-master' } } assert self.alignak_env.get_daemons() == daemons assert self.alignak_env.get_daemons(daemon_name='unknown') == {} assert self.alignak_env.get_daemons(daemon_name='arbiter-master') == arbiter_master module_ws = { '_dist': '/tmp', '_dist_etc': '/tmp/etc/alignak', 'config_name': 'Alignak global configuration', 'extra_config_name': 'extra', 'imported_from': os.path.join(cwd, 'cfg/environment/several_files/alignak_ok.ini'), 'python_name': 'alignak_module_ws', 'type': 'web-services', 'name': 'web-services', 'extra_variable': 'extra' } modules = { 'module.web-services': module_ws } assert self.alignak_env.get_modules() == modules assert self.alignak_env.get_modules(name='unknown') == {} assert self.alignak_env.get_modules(daemon_name='arbiter-master') == ['web-services'] assert self.alignak_env.get_modules(daemon_name='arbiter-master', names_only=False) == [module_ws] assert self.alignak_env.get_modules(name='web-services') == module_ws
class TestEnvironment(AlignakTest): ''' This class tests the environment (eg. main configuration) file ''' def setUp(self): pass def test_config_ko(self): ''' Tests error in the configuration parser calling :return: None ''' pass def test_config_ko_content(self): ''' Configuration has some loading problems ... :return: None ''' pass def test_config_ok(self): ''' Default shipped configuration has no loading problems ... :return: None ''' pass def test_config_several_files_ok(self): ''' Default shipped configuration has no loading problems ... :return: None ''' pass
6
5
30
3
22
5
1
0.27
1
4
1
0
5
1
5
60
159
19
110
20
104
30
54
20
48
1
2
1
5
3,961
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_escalations.py
tests.test_escalations.TestEscalations
class TestEscalations(AlignakTest): """ This class tests for escalations """ def setUp(self): super(TestEscalations, self).setUp() self.setup_with_file('./cfg/cfg_escalations.cfg', dispatching=True) assert self.conf_is_correct # No error messages assert len(self.configuration_errors) == 0 # No warning messages assert len(self.configuration_warnings) == 0 def test_wildcard_in_service_description(self): """ Test wildcards in service description """ self_generated = [e for e in self._scheduler.pushed_conf.escalations if e.escalation_name.startswith('Generated-SE-')] host_services = self._scheduler.services.find_srvs_by_hostname("test_host_0_esc") # Todo: confirm this assertion # We only found one, but there are 3 services for this host ... perharps normal? assert 1 == len(self_generated) assert 3 == len(host_services) # We must find at least one self generated escalation in our host services for svc in host_services: print(("Service: %s" % self._scheduler.services[svc])) assert self_generated[0].uuid in self._scheduler.services[svc].escalations def test_simple_escalation(self): """ Test a simple escalation (NAGIOS legacy) """ del self._main_broker.broks[:] # Check freshness on each scheduler tick self._scheduler.update_recurrent_works_tick({'tick_manage_internal_checks': 10}) # Get host and services host = self._scheduler.hosts.find_by_name("test_host_0_esc") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0_esc", "test_svc_esc") svc.checks_in_progress = [] svc.act_depend_of = [] # ignore the host svc.event_handler_enabled = False # The service has 3 defined escalations: assert 3 == len(svc.escalations) # Service escalation levels # Generated service escalation has a name based upon SE uuid ... too hard to get it simply:) # self_generated = self._scheduler.escalations.find_by_name('Generated-ServiceEscalation-%s-%s') # self.assertIsNotNone(self_generated) # self.assertIs(self_generated, Serviceescalation) # self.assertIn(self_generated.uuid, svc.escalations) tolevel2 = self._scheduler.escalations.find_by_name('ToLevel2') assert tolevel2 is not None self.assertIsInstance(tolevel2, Escalation) assert tolevel2.uuid in svc.escalations tolevel3 = self._scheduler.escalations.find_by_name('ToLevel3') assert tolevel3 is not None self.assertIsInstance(tolevel3, Escalation) assert tolevel3.uuid in svc.escalations # 1 notification per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime # -------------------------------------------------------------- # initialize host/service state # -------------------------------------------------------------- self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) assert "HARD" == host.state_type assert "UP" == host.state assert 0 == host.current_notification_number assert "HARD" == svc.state_type assert "OK" == svc.state assert 0 == svc.current_notification_number # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service goes to CRITICAL/SOFT self.scheduler_loop(1, [[svc, 2, 'BAD']]) assert "SOFT" == svc.state_type assert "CRITICAL" == svc.state # No notification... assert 0 == svc.current_notification_number # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # --- # 1/ # --- # Service goes to CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state # Service notification number must be 1 assert 1 == svc.current_notification_number cnn = svc.current_notification_number # We did not yet got an escalated notification assert 0 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) # We should have had 2 ALERT and a NOTIFICATION to the service defined contact # We also have a notification to level1 contact which is a contact defined for the host expected_logs = [ ('info', 'ACTIVE HOST CHECK: test_host_0_esc;UP;0;UP'), ('info', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;OK;0;OK'), ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;1;BAD'), ('error', 'SERVICE ALERT: test_host_0_esc;test_svc_esc;CRITICAL;SOFT;1;BAD'), ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;1;BAD'), ('error', 'SERVICE ALERT: test_host_0_esc;test_svc_esc;CRITICAL;HARD;2;BAD'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0_esc;test_svc_esc;CRITICAL;1;notify-service;BAD'), ('error', 'SERVICE NOTIFICATION: level1;test_host_0_esc;test_svc_esc;CRITICAL;1;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs, dump=True) # --- # 2/ # --- # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service is now CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 2 == svc.current_notification_number # We got an escalated notification assert 1 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) # Now also notified to the level2 expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc;CRITICAL;2;notify-service;BAD') ] self.check_monitoring_events_log(expected_logs) # --- # 3/ # --- # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 3 == svc.current_notification_number # We got one more escalated notification assert 2 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc;' 'CRITICAL;3;notify-service;BAD') ] self.check_monitoring_events_log(expected_logs) # --- # 4/ # --- # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 4 == svc.current_notification_number # We got one more escalated notification assert 3 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc;' 'CRITICAL;4;notify-service;BAD') ] self.check_monitoring_events_log(expected_logs) # --- # 5/ # --- # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 5 == svc.current_notification_number # We got one more escalated notification assert 4 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc;' 'CRITICAL;4;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs) # --- # 6/ # --- # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 6 == svc.current_notification_number # We got one more escalated notification but we notified level 3 ! assert 5 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc;' 'CRITICAL;5;notify-service;BAD') ] self.check_monitoring_events_log(expected_logs) # --- # 7/ # --- # Now we send 10 more alerts and we are still always notifying only level3 for i in range(10): # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service is still CRITICAL/HARD # time.sleep(.2) self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 7 + i == svc.current_notification_number # We got one more escalated notification assert 6 + i == \ len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc;' 'CRITICAL;%d;notify-service;BAD' % (7 + i)) ] self.check_monitoring_events_log(expected_logs) # --- # 8/ # --- # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # The service recovers, all the notified contact will be contacted self.scheduler_loop(2, [[svc, 0, 'OK']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) expected_logs += [ ('info', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;OK;2;OK'), ('info', 'SERVICE ALERT: test_host_0_esc;test_svc_esc;OK;HARD;2;OK'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0_esc;test_svc_esc;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: level1;test_host_0_esc;test_svc_esc;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc;' 'OK;0;notify-service;OK'), ('info', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc;OK;1;OK') ] self.check_monitoring_events_log(expected_logs) def test_time_based_escalation(self): """ Time based escalations """ del self._main_broker.broks[:] self._scheduler.pushed_conf.tick_manage_internal_checks = 7200 self._scheduler.update_recurrent_works_tick({'tick_manage_internal_checks': 7200}) # Get host and services host = self._scheduler.hosts.find_by_name("test_host_0_esc") host.checks_in_progress = [] host.act_depend_of = [] # ignore the default router host.passive_checks_enabled = False print("Host check: %s / %s / %s / %s" % (host.active_checks_enabled, host.passive_checks_enabled, host.check_freshness, host.freshness_threshold)) host.check_interval = 7200 host.retry_interval = 7200 print("Host check: %s / %s / %s" % (host.check_period, host.check_interval, host.retry_interval)) print("Host check command: %s" % (host.check_command)) host.notification_interval = 1200 print("Host notifications: %s / %s / %s" % (host.notification_interval, host.notification_period, host.notification_options)) svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0_esc", "test_svc_esc_time") svc.checks_in_progress = [] svc.act_depend_of = [] # ignore the host svc.event_handler_enabled = False # The service has 3 defined escalations: assert 3 == len(svc.escalations) # Service escalation levels # Generated service escalation has a name based upon SE uuid ... too hard to get it simply:) # self_generated = self._scheduler.escalations.find_by_name('Generated-ServiceEscalation-%s-%s') # self.assertIsNotNone(self_generated) # self.assertIs(self_generated, Serviceescalation) # self.assertIn(self_generated.uuid, svc.escalations) tolevel2 = self._scheduler.escalations.find_by_name('ToLevel2-time') assert tolevel2 is not None print("Esc: %s / %s" % (type(tolevel2), tolevel2)) self.assertIsInstance(tolevel2, Escalation) assert tolevel2.uuid in svc.escalations tolevel3 = self._scheduler.escalations.find_by_name('ToLevel3-time') assert tolevel3 is not None self.assertIsInstance(tolevel3, Escalation) assert tolevel3.uuid in svc.escalations # 1 notification per minute svc.notification_interval = 1 # Freeze the time ! initial_datetime = datetime.datetime(year=2018, month=6, day=1, hour=18, minute=30, second=0) with freeze_time(initial_datetime) as frozen_datetime: assert frozen_datetime() == initial_datetime #-------------------------------------------------------------- # initialize host/service state #-------------------------------------------------------------- self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) assert "HARD" == host.state_type assert "UP" == host.state assert 0 == host.current_notification_number assert "HARD" == svc.state_type assert "OK" == svc.state assert 0 == svc.current_notification_number # We should have had 2 ALERT and a NOTIFICATION to the service defined contact # We also have a notification to level1 contact which is a contact defined for the host expected_logs = [ ('info', 'ACTIVE HOST CHECK: test_host_0_esc;UP;0;UP'), ('info', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;OK;0;OK'), ] self.check_monitoring_events_log(expected_logs) # Time warp frozen_datetime.tick(delta=datetime.timedelta(minutes=1, seconds=1)) # Service goes to CRITICAL/SOFT self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "SOFT" == svc.state_type assert "CRITICAL" == svc.state # No notification... assert 0 == svc.current_notification_number # We should have had 2 ALERT and a NOTIFICATION to the service defined contact # We also have a notification to level1 contact which is a contact defined for the host expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;1;BAD'), ('error', 'SERVICE ALERT: test_host_0_esc;test_svc_esc_time;CRITICAL;SOFT;1;BAD') ] self.check_monitoring_events_log(expected_logs) # --- # 1/ # --- # Service goes to CRITICAL/HARD time.sleep(1) self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) assert "HARD" == svc.state_type assert "CRITICAL" == svc.state # Service notification number must be 1 assert 1 == svc.current_notification_number cnn = svc.current_notification_number # We did not yet got an escalated notification assert 0 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) # We should have had 2 ALERT and a NOTIFICATION to the service defined contact # We also have a notification to level1 contact which is a contact defined for the host expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;1;BAD'), ('error', 'SERVICE ALERT: test_host_0_esc;test_svc_esc_time;CRITICAL;SOFT;1;BAD'), ('error', 'SERVICE NOTIFICATION: level1;test_host_0_esc;test_svc_esc_time;CRITICAL;1;notify-service;BAD'), ('error', 'SERVICE NOTIFICATION: test_contact;test_host_0_esc;test_svc_esc_time;CRITICAL;1;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs) # --- # time warp ... 5 minutes later ! # --- frozen_datetime.tick(delta=datetime.timedelta(minutes=5, seconds=1)) # --- # 2/ # --- # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # The notifications are created to be launched in the next second when they happen ! # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 2 == svc.current_notification_number # We got 1 escalated notification assert 1 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) # Now also notified to the level2 expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;2;BAD'), # ('info', # 'ACTIVE HOST CHECK: test_host_0_esc;UP;HARD;1;Host assumed to be UP'), ('error', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc_time;CRITICAL;2;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs) # --- # time warp ... 5 minutes later ! # --- frozen_datetime.tick(delta=datetime.timedelta(minutes=5, seconds=1)) # --- # 3/ # --- # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 3 == svc.current_notification_number # We got 1 more escalated notification assert 2 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc_time;CRITICAL;3;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs) # --- # time warp ... 5 minutes later ! # --- frozen_datetime.tick(delta=datetime.timedelta(minutes=5, seconds=1)) # --- # 4/ # --- # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 4 == svc.current_notification_number # We got one more escalated notification assert 3 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc_time;' 'CRITICAL;3;notify-service;BAD'), # ('info', # 'ACTIVE HOST CHECK: test_host_0_esc;UP;HARD;1;Host assumed to be UP'), ] self.check_monitoring_events_log(expected_logs) # --- # 5/ # --- # Now we send 10 more alerts and we are still always notifying only level3 for i in range(10): # --- # time warp ... 5 minutes later ! # --- frozen_datetime.tick(delta=datetime.timedelta(minutes=5, seconds=1)) # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 5 + i == svc.current_notification_number # We got one more escalated notification assert 4 + i == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc_time;' 'CRITICAL;%d;notify-service;BAD' % (5 + i)), ] self.check_monitoring_events_log(expected_logs) # --- # 6/ 1 hour later! # --- # --- # time warp ... 5 minutes later ! # --- frozen_datetime.tick(delta=datetime.timedelta(minutes=60)) # Service is still CRITICAL/HARD self.scheduler_loop(1, [[svc, 2, 'BAD']]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) # Service notification number increased assert 15 == svc.current_notification_number # We got one more escalated notification assert 15 == len([n.escalated for n in list(self._scheduler.actions.values()) if n.escalated]) expected_logs += [ ('error', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;CRITICAL;2;BAD'), ('error', 'SERVICE NOTIFICATION: all_services_1_hour;test_host_0_esc;test_svc_esc_time;' 'CRITICAL;15;notify-service;BAD'), ('error', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc_time;' 'CRITICAL;15;notify-service;BAD'), ] self.check_monitoring_events_log(expected_logs) # --- # 7/ # --- # --- # time warp ... 5 minutes later ! # --- frozen_datetime.tick(delta=datetime.timedelta(minutes=5, seconds=1)) # The service recovers, all the notified contact will be contacted self.scheduler_loop(1, [[svc, 0, 'OK']]) # Time warp 1 second frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.scheduler_loop(1) expected_logs += [ ('info', 'ACTIVE SERVICE CHECK: test_host_0_esc;test_svc_esc_time;OK;2;OK'), ('info', 'SERVICE ALERT: test_host_0_esc;test_svc_esc_time;OK;HARD;2;OK'), ('info', 'SERVICE NOTIFICATION: all_services_1_hour;test_host_0_esc;test_svc_esc_time;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: test_contact;test_host_0_esc;test_svc_esc_time;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: level3;test_host_0_esc;test_svc_esc_time;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: level2;test_host_0_esc;test_svc_esc_time;' 'OK;0;notify-service;OK'), ('info', 'SERVICE NOTIFICATION: level1;test_host_0_esc;test_svc_esc_time;' 'OK;0;notify-service;OK') ] self.check_monitoring_events_log(expected_logs)
class TestEscalations(AlignakTest): ''' This class tests for escalations ''' def setUp(self): pass def test_wildcard_in_service_description(self): ''' Test wildcards in service description ''' pass def test_simple_escalation(self): ''' Test a simple escalation (NAGIOS legacy) ''' pass def test_time_based_escalation(self): ''' Time based escalations ''' pass
5
4
159
20
90
50
2
0.57
1
7
1
0
4
0
4
59
643
82
361
26
356
204
225
24
220
2
2
2
7
3,962
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_eventhandler.py
tests.test_eventhandler.TestEventhandler
class TestEventhandler(AlignakTest): """ This class test the eventhandler """ def setUp(self): super(TestEventhandler, self).setUp() def test_global_unknown_event_handler(self): """ Test global event handler unknown command :return: None """ with pytest.raises(SystemExit): self.setup_with_file('cfg/cfg_global_event_handlers_not_found.cfg') assert self.conf_is_correct is False self.show_configuration_logs() def test_global_event_handler(self): """ Test global event handler scenario 1: * check OK OK HARD * check CRITICAL x4 CRITICAL SOFT x1 then CRITICAL HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_global_event_handlers.cfg', dispatching=True) self.clear_events() host = self._scheduler.hosts.find_by_name("test_host_1") print(host.event_handler_enabled) assert host.event_handler_enabled is True print("host: %s" % host.event_handler) print("global: %s" % host.__class__.global_event_handler) host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_1", "test_ok_0") assert svc.event_handler_enabled is True print("svc: %s" % svc.event_handler) print("global: %s" % svc.__class__.global_event_handler) svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(1) self.assert_actions_match(0, 'test_global_service_eventhandler.pl CRITICAL SOFT', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.assert_actions_match(0, 'test_global_service_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_global_service_eventhandler.pl CRITICAL HARD', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) self.assert_actions_match(0, 'test_global_service_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_global_service_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(2, 'test_global_service_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) # Do not change self.assert_actions_count(3) self.scheduler_loop(1, [[host, 2, 'DOWN']]) time.sleep(0.1) self.show_actions() self.assert_actions_count(4) self.assert_actions_match(0, 'test_global_service_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_global_service_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(2, 'test_global_service_eventhandler.pl OK HARD', 'command') self.assert_actions_match(3, 'test_global_host_eventhandler.pl', 'command') self.scheduler_loop(1, [[host, 0, 'UP']]) time.sleep(0.1) self.show_actions() self.assert_actions_count(5) self.assert_actions_match(0, 'test_global_service_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_global_service_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(2, 'test_global_service_eventhandler.pl OK HARD', 'command') self.assert_actions_match(3, 'test_global_host_eventhandler.pl DOWN SOFT', 'command') self.assert_actions_match(4, 'test_global_host_eventhandler.pl UP SOFT', 'command') expected_logs = [ ('info', 'ACTIVE HOST CHECK: test_host_1;UP;0;UP'), ('info', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;OK;0;OK'), ('error', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;CRITICAL;1;CRITICAL'), ('error', 'SERVICE ALERT: test_host_1;test_ok_0;CRITICAL;SOFT;1;CRITICAL'), ('error', 'SERVICE EVENT HANDLER: test_host_1;test_ok_0;CRITICAL;SOFT;1;global_service_eventhandler'), ('error', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;CRITICAL;1;CRITICAL'), ('error', 'SERVICE ALERT: test_host_1;test_ok_0;CRITICAL;HARD;2;CRITICAL'), ('error', 'SERVICE EVENT HANDLER: test_host_1;test_ok_0;CRITICAL;HARD;2;global_service_eventhandler'), ('error', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;CRITICAL;2;CRITICAL'), ('error', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;CRITICAL;2;CRITICAL'), ('info', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;OK;2;OK'), ('info', 'SERVICE ALERT: test_host_1;test_ok_0;OK;HARD;2;OK'), ('info', 'SERVICE EVENT HANDLER: test_host_1;test_ok_0;OK;HARD;2;global_service_eventhandler'), ('info', 'ACTIVE SERVICE CHECK: test_host_1;test_ok_0;OK;1;OK'), ('error', 'ACTIVE HOST CHECK: test_host_1;DOWN;1;DOWN'), ('error', 'HOST ALERT: test_host_1;DOWN;SOFT;1;DOWN'), ('error', 'HOST EVENT HANDLER: test_host_1;DOWN;SOFT;1;global_host_eventhandler'), ('info', 'ACTIVE HOST CHECK: test_host_1;UP;1;UP'), ('info', 'HOST ALERT: test_host_1;UP;SOFT;2;UP'), ('info', 'HOST EVENT HANDLER: test_host_1;UP;SOFT;2;global_host_eventhandler'), ] self.check_monitoring_events_log(expected_logs, dump=True) def test_ok_critical_ok(self): """ Test event handler scenario 1: * check OK OK HARD * check CRITICAL x4 CRITICAL SOFT x1 then CRITICAL HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(1) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl CRITICAL HARD', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) def test_ok_warning_ok(self): """ Test event handler scenario 2: * check OK OK HARD * check WARNING x4 WARNING SOFT x1 then WARNING HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(1) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(2) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) def test_ok_warning_critical_ok(self): """ Test event handler scenario 3: * check OK OK HARD * check WARNING x4 WARNING SOFT x1 then WARNING HARD * check CRITICAL x4 CRITICAL HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) assert "SOFT" == svc.state_type self.assert_actions_count(1) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(2) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(3) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl CRITICAL HARD', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(3) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(3) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(3) self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(4) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(3, 'test_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(4) def test_ok_warning_s_critical_h_ok(self): """ Test event handler scenario 4: * check OK OK HARD * check WARNING WARNING SOFT * check CRITICAL x2 CRITICAL HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) assert "SOFT" == svc.state_type self.assert_actions_count(1) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(2) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl CRITICAL HARD', 'command') self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(3) self.assert_actions_match(0, 'test_eventhandler.pl WARNING SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) def test_ok_critical_s_warning_h_ok(self): """ Test event handler scenario 5: * check OK OK HARD * check CRITICAL CRITICAL SOFT * check WARNING x2 WARNING HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) assert "SOFT" == svc.state_type self.assert_actions_count(1) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(2) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(3) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(3) def test_ok_critical_s_warning_h_warning_h_ok(self): """ Test event handler scenario 6: * check OK OK HARD * check CRITICAL CRITICAL SOFT * check WARNING x2 WARNING HARD * check CRITICAL CRITICAL HARD * check OK x2 OK HARD :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.enable_notifications = False svc.notification_interval = 0 self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(0) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) assert "SOFT" == svc.state_type self.assert_actions_count(1) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(2) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.scheduler_loop(1, [[svc, 1, 'WARNING']]) time.sleep(0.1) self.assert_actions_count(2) self.scheduler_loop(1, [[svc, 2, 'CRITICAL']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(3) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl CRITICAL HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) assert "HARD" == svc.state_type self.assert_actions_count(4) self.assert_actions_match(0, 'test_eventhandler.pl CRITICAL SOFT', 'command') self.assert_actions_match(1, 'test_eventhandler.pl WARNING HARD', 'command') self.assert_actions_match(2, 'test_eventhandler.pl CRITICAL HARD', 'command') self.assert_actions_match(3, 'test_eventhandler.pl OK HARD', 'command') self.scheduler_loop(1, [[svc, 0, 'OK']]) time.sleep(0.1) self.assert_actions_count(4)
class TestEventhandler(AlignakTest): ''' This class test the eventhandler ''' def setUp(self): pass def test_global_unknown_event_handler(self): ''' Test global event handler unknown command :return: None ''' pass def test_global_event_handler(self): ''' Test global event handler scenario 1: * check OK OK HARD * check CRITICAL x4 CRITICAL SOFT x1 then CRITICAL HARD * check OK x2 OK HARD :return: None ''' pass def test_ok_critical_ok(self): ''' Test event handler scenario 1: * check OK OK HARD * check CRITICAL x4 CRITICAL SOFT x1 then CRITICAL HARD * check OK x2 OK HARD :return: None ''' pass def test_ok_warning_ok(self): ''' Test event handler scenario 2: * check OK OK HARD * check WARNING x4 WARNING SOFT x1 then WARNING HARD * check OK x2 OK HARD :return: None ''' pass def test_ok_warning_critical_ok(self): ''' Test event handler scenario 3: * check OK OK HARD * check WARNING x4 WARNING SOFT x1 then WARNING HARD * check CRITICAL x4 CRITICAL HARD * check OK x2 OK HARD :return: None ''' pass def test_ok_warning_s_critical_h_ok(self): ''' Test event handler scenario 4: * check OK OK HARD * check WARNING WARNING SOFT * check CRITICAL x2 CRITICAL HARD * check OK x2 OK HARD :return: None ''' pass def test_ok_critical_s_warning_h_ok(self): ''' Test event handler scenario 5: * check OK OK HARD * check CRITICAL CRITICAL SOFT * check WARNING x2 WARNING HARD * check OK x2 OK HARD :return: None ''' pass def test_ok_critical_s_warning_h_warning_h_ok(self): ''' Test event handler scenario 6: * check OK OK HARD * check CRITICAL CRITICAL SOFT * check WARNING x2 WARNING HARD * check CRITICAL CRITICAL HARD * check OK x2 OK HARD :return: None ''' pass
10
9
53
8
39
7
1
0.19
1
2
0
0
9
0
9
64
491
84
353
25
343
68
323
25
313
1
2
1
9
3,963
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_exclude_services.py
tests.test_exclude_services.TestExcludeServices
class TestExcludeServices(AlignakTest): """ This class test service exclude / service include feature """ def setUp(self): super(TestExcludeServices, self).setUp() self.setup_with_file('cfg/cfg_exclude_include_services.cfg', dispatching=True) def test_exclude_services(self): """ Test service_excludes statement in host """ hst1 = self._scheduler.hosts.find_by_name("test_host_01") hst2 = self._scheduler.hosts.find_by_name("test_host_02") assert [] == hst1.service_excludes assert ["srv-svc11", "srv-svc21", "proc proc1"] == hst2.service_excludes Find = self._scheduler.services.find_srv_by_name_and_hostname # All services should exist for test_host_01 find = partial(Find, 'test_host_01') for svc in ( 'srv-svc11', 'srv-svc12', 'srv-svc21', 'srv-svc22', 'proc proc1', 'proc proc2', ): assert find(svc) is not None # Half the services only should exist for test_host_02 find = partial(Find, 'test_host_02') for svc in ('srv-svc12', 'srv-svc22', 'proc proc2', ): assert find(svc) is not None, "%s not found" % svc for svc in ('srv-svc11', 'srv-svc21', 'proc proc1', ): assert find(svc) is None, "%s found" % svc def test_service_includes(self): """ Test service_includes statement in host """ find = self._scheduler.services.find_srv_by_name_and_hostname find = partial(find, 'test_host_03') print("Found: %s / %s" % (find, find.__dict__)) for svc in ('srv-svc11', 'proc proc2', 'srv-svc22'): assert find(svc) is not None, "%s not found" % svc for svc in ('srv-svc12', 'srv-svc21', 'proc proc1'): assert find(svc) is None, "%s found" % svc
class TestExcludeServices(AlignakTest): ''' This class test service exclude / service include feature ''' def setUp(self): pass def test_exclude_services(self): ''' Test service_excludes statement in host ''' pass def test_service_includes(self): ''' Test service_includes statement in host ''' pass
4
3
15
3
10
3
3
0.37
1
2
0
0
3
0
3
58
52
11
30
11
26
11
26
11
22
4
2
1
8
3,964
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestPathProp
class TestPathProp(TestStringProp): """Test the PathProp class""" prop_class = alignak.property.PathProp
class TestPathProp(TestStringProp): '''Test the PathProp class''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
60
4
1
2
2
1
1
2
2
1
0
3
0
0
3,965
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestListProp
class TestListProp(PropertyTests, AlignakTest): """Test the ListProp class""" prop_class = alignak.property.ListProp def test_pythonize(self): p = self.prop_class() assert p.pythonize("") == [] assert p.pythonize("1,2,3") == ["1", "2", "3"] # Default is to split on coma for list also. assert p.pythonize(["1,2,3", "4,5,6"]) == ["1","2","3", "4","5","6"] def test_pythonize_nosplit(self): p = self.prop_class(split_on_comma=False) assert p.pythonize("") == [] assert p.pythonize("1,2,3") == ["1,2,3"] # Default is to split on coma for list also. assert p.pythonize(["1,2,3", "4,5,6"]) == ["1,2,3", "4,5,6"]
class TestListProp(PropertyTests, AlignakTest): '''Test the ListProp class''' def test_pythonize(self): pass def test_pythonize_nosplit(self): pass
3
1
6
0
5
1
1
0.25
2
0
0
0
2
0
2
61
18
3
12
6
9
3
12
6
9
1
2
0
2
3,966
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestIntegerProp
class TestIntegerProp(PropertyTests, AlignakTest): """Test the IntegerProp class""" prop_class = alignak.property.IntegerProp def test_pythonize(self): p = self.prop_class() assert p.pythonize("1") == 1 assert p.pythonize("0") == 0 assert p.pythonize("1000.33") == 1000 assert p.pythonize(["2000.66", "1000.33"]) == 1000
class TestIntegerProp(PropertyTests, AlignakTest): '''Test the IntegerProp class''' def test_pythonize(self): pass
2
1
6
0
6
0
1
0.13
2
0
0
0
1
0
1
60
11
2
8
4
6
1
8
4
6
1
2
0
1
3,967
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestFloatProp
class TestFloatProp(PropertyTests, AlignakTest): """Test the FloatProp class""" prop_class = alignak.property.FloatProp def test_pythonize(self): p = self.prop_class() assert p.pythonize("1") == 1.0 assert p.pythonize("0") == 0.0 assert p.pythonize("1000.33") == 1000.33 assert p.pythonize(["2000.66", "1000.33"]) == 1000.33
class TestFloatProp(PropertyTests, AlignakTest): '''Test the FloatProp class''' def test_pythonize(self): pass
2
1
6
0
6
0
1
0.13
2
0
0
0
1
0
1
60
11
2
8
4
6
1
8
4
6
1
2
0
1
3,968
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestConfigPathProp
class TestConfigPathProp(TestStringProp): """Test the ConfigPathProp class""" prop_class = alignak.property.ConfigPathProp
class TestConfigPathProp(TestStringProp): '''Test the ConfigPathProp class''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
60
4
1
2
2
1
1
2
2
1
0
3
0
0
3,969
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestCharProp
class TestCharProp(PropertyTests, AlignakTest): """Test the CharProp class""" prop_class = alignak.property.CharProp def test_pythonize(self): p = self.prop_class() assert p.pythonize("c") == "c" assert p.pythonize("cxxxx") == "c" assert p.pythonize(["bxxxx", "cxxxx"]) == "c"
class TestCharProp(PropertyTests, AlignakTest): '''Test the CharProp class''' def test_pythonize(self): pass
2
1
5
0
5
0
1
0.14
2
0
0
0
1
0
1
60
10
2
7
4
5
1
7
4
5
1
2
0
1
3,970
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestBoolProp
class TestBoolProp(PropertyTests, AlignakTest): """Test the BoolProp class""" prop_class = alignak.property.BoolProp def test_pythonize(self): p = self.prop_class() # allowed strings for `True` assert p.pythonize("1") == True assert p.pythonize("yes") == True assert p.pythonize("true") == True assert p.pythonize("on") == True assert p.pythonize(["off", "on"]) == True # allowed strings for `False` assert p.pythonize("0") == False assert p.pythonize("no") == False assert p.pythonize("false") == False assert p.pythonize("off") == False assert p.pythonize(["on", "off"]) == False
class TestBoolProp(PropertyTests, AlignakTest): '''Test the BoolProp class''' def test_pythonize(self): pass
2
1
14
0
12
2
1
0.21
2
0
0
0
1
0
1
60
19
2
14
4
12
3
14
4
12
1
2
0
1
3,971
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties.py
tests.test_aa_properties.TestAddrProp
class TestAddrProp(PropertyTests, AlignakTest): """Test the AddrProp class""" prop_class = alignak.property.AddrProp def test_pythonize_with_IPv4_addr(self): p = self.prop_class() assert p.pythonize("192.168.10.11:445") == \ {'address': "192.168.10.11", 'port': 445} # no colon, no port assert p.pythonize("192.168.10.11") == \ {'address': "192.168.10.11"} # colon but no port number with pytest.raises(ValueError): p.pythonize("192.168.10.11:") # only colon, no addr, no port number with pytest.raises(ValueError): p.pythonize(":") # no address, only port number assert p.pythonize(":445") == \ {'address': "", 'port': 445} def test_pythonize_with_hostname(self): p = self.prop_class() assert p.pythonize("host_123:445") == \ {'address': "host_123", 'port': 445} # no colon, no port assert p.pythonize("host_123") == \ {'address': "host_123"} # colon but no port number with pytest.raises(ValueError): p.pythonize("host_123:") # only colon, no addr, no port number with pytest.raises(ValueError): p.pythonize(":") # no address, only port number assert p.pythonize(":445") == \ {'address': "", 'port': 445} assert p.pythonize([":444", ":445"]) == \ {'address': "", 'port': 445}
class TestAddrProp(PropertyTests, AlignakTest): '''Test the AddrProp class''' def test_pythonize_with_IPv4_addr(self): pass def test_pythonize_with_hostname(self): pass
3
1
20
0
16
4
1
0.27
2
1
0
0
2
0
2
61
45
3
33
6
30
9
21
6
18
1
2
1
2
3,972
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_dateranges.py
tests.test_dateranges.TestDateRanges
class TestDateRanges(AlignakTest): """ This class test dataranges """ def setUp(self): super(TestDateRanges, self).setUp() def test_get_start_of_day(self): """ Test function get_start_of_day and return the timestamp of begin of day :return: None """ now = time.localtime() start = time.mktime((2015, 7, 26, 0, 0, 0, 0, 0, now.tm_isdst)) timestamp = alignak.daterange.get_start_of_day(2015, 7, 26) # time.timezone is the offset related of the current timezone of the system print("Start: %s, timestamp: %s" % (start, timestamp)) if start != timestamp: assert start == (timestamp - time.timezone) # @pytest.mark.skip("To be completed... because the start test do not pass locally!") def test_get_start_of_day_tz_aware(self): """ Test function get_start_of_day and return the timestamp of begin of day :return: None """ now = time.localtime() tz_shift = time.timezone dst = now.tm_isdst print("Now: %s, timezone: %s, DST: %s" % (now, tz_shift, dst)) start = time.mktime((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0, 0, 0, -1)) print("Start: %s" % start) # Alignak returns the start of day ts in local time timestamp = alignak.daterange.get_start_of_day(now.tm_year, now.tm_mon, now.tm_mday) print("Timestamp: %s" % timestamp) # time.timezone is the offset related of the current timezone of the system if start != timestamp: assert start == (timestamp - time.timezone) def test_get_end_of_day(self): """ Test function get_end_of_day and return the timestamp of end of day :return: None """ now = time.localtime() start = time.mktime((2016, 8, 20, 23, 59, 59, 0, 0, now.tm_isdst)) timestamp = alignak.daterange.get_end_of_day(2016, 8, 20) print("Start: %s, timestamp: %s" % (start, timestamp)) # time.timezone is the offset related of the current timezone of the system if start != timestamp: assert start == (timestamp - time.timezone) def test_find_day_by_weekday_offset(self): """ Test function find_day_by_weekday_offset to get day number. In this case, 1 = thuesday and -1 = last thuesday of July 2010, so it's the 27 july 2010 :return: None """ ret = find_day_by_weekday_offset(2010, 7, 1, -1) assert 27 == ret def test_find_day_by_offset(self): """ Test function find_day_by_offset to get the day with offset. In this case, the last day number of july, so the 31th :return: None """ ret = find_day_by_offset(2015, 7, -1) assert 31 == ret ret = find_day_by_offset(2015, 7, 10) assert 10 == ret def test_calendardaterange_start_end_time(self): """ Test CalendarDaterange.get_start_and_end_time to get start and end date of date range :return: None """ local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset data = { '2015-07-20 01:50:00 %s' % local_hour_offset: { 'start': 1437868800 + local_offset, 'end': 1471737599 + local_offset }, '2015-07-26 01:50:00 %s' % local_hour_offset: { 'start': 1437868800 + local_offset, 'end': 1471737599 + local_offset }, '2016-01-01 01:50:00 %s' % local_hour_offset: { 'start': 1437868800 + local_offset, 'end': 1471737599 + local_offset }, '2016-08-21 01:50:00 %s' % local_hour_offset: { 'start': 1437868800 + local_offset, 'end': 1471737599 + local_offset }, } for date_now in data: with freeze_time(date_now, tz_offset=0): caldate = CalendarDaterange({ 'syear': 2015, 'smon': 7, 'smday': 26, 'swday': 0, 'swday_offset': 0, 'eyear': 2016, 'emon': 8, 'emday': 20, 'ewday': 0, 'ewday_offset': 0, 'skip_interval': 3, 'other': ''}) ret = caldate.get_start_and_end_time() print("* %s" % date_now) assert data[date_now]['start'] == ret[0] assert data[date_now]['end'] == ret[1] def test_standarddaterange_start_end_time(self): """ Test StandardDaterange.get_start_and_end_time to get start and end date of date range :return: None """ local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset data = {} for num in range(1, 3): data['2015-07-%02d 01:50:00 %s' % (num, local_hour_offset)] = { 'start': 1435881600 + local_offset, 'end': 1435967999 + local_offset } for num in range(4, 10): data['2015-07-%02d 01:50:00 %s' % (num, local_hour_offset)] = { 'start': 1436486400 + local_offset, 'end': 1436572799 + local_offset } for num in range(11, 17): data['2015-07-%02d 01:50:00 %s' % (num, local_hour_offset)] = { 'start': 1437091200 + local_offset, 'end': 1437177599 + local_offset } # Time from next friday morning to next friday night caldate = StandardDaterange({'day': 'friday', 'other': '00:00-24:00'}) for date_now in data: with freeze_time(date_now, tz_offset=0): # ret = caldate.get_start_and_end_time() # print("* %s" % date_now) # assert data[date_now]['start'] == ret[0] # assert data[date_now]['end'] == ret[1] start, end = caldate.get_start_and_end_time() print("-> res: %s (%s) - %s (%s)" % (start, type(start), end, type(end))) assert data[date_now]['start'] == start assert data[date_now]['end'] == end def test_monthweekdaydaterange_start_end_time(self): """ Test MonthWeekDayDaterange.get_start_and_end_time to get start and end date of date range :return: None """ data = {} local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset for num in range(1, 31): data['2015-07-%02d 01:50:00 %s' % (num, local_hour_offset)] = { 'start': 1436832000 + local_offset, 'end': 1440201599 + local_offset } for num in range(1, 21): data['2015-08-%02d 01:50:00 %s' % (num, local_hour_offset)] = { 'start': 1436832000 + local_offset, 'end': 1440201599 + local_offset } for num in range(22, 31): data['2015-08-%02d 01:50:00 %s ' % (num, local_hour_offset)] = { 'start': 1468281600 + local_offset, 'end': 1471651199 + local_offset } # 2nd tuesday of July 2015 => 14 # 3rd friday of August 2015 => 21 # next : 2nd tuesday of July 2016 => 12 # next 3rd friday of August 2016 => 19 params = {'syear': 2015, 'smon': 7, 'smday': 0, 'swday': 1, 'swday_offset': 2, 'eyear': 2015, 'emon': 8, 'emday': 0, 'ewday': 4, 'ewday_offset': 3, 'skip_interval': 0, 'other': ''} caldate = MonthWeekDayDaterange(params) for date_now in data: with freeze_time(date_now, tz_offset=0): ret = caldate.get_start_and_end_time() print("* %s" % date_now) assert data[date_now]['start'] == ret[0] assert data[date_now]['end'] == ret[1] def test_monthdatedaterange_start_end_time(self): """ Test MonthDateDaterange.get_start_and_end_time to get start and end date of date range :return: None """ local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset data = { '2015-07-20 00:50:00 %s' % local_hour_offset: { 'start': 1437868800 + local_offset, 'end': 1440115199 + local_offset }, '2015-07-26 00:50:00 %s' % local_hour_offset: { 'start': 1437868800 + local_offset, 'end': 1440115199 + local_offset }, '2015-08-28 00:50:00 %s' % local_hour_offset: { 'start': 1469491200 + local_offset, 'end': 1471737599 + local_offset }, '2016-01-01 00:50:00 %s' % local_hour_offset: { 'start': 1469491200 + local_offset, 'end': 1471737599 + local_offset }, } for date_now in data: with freeze_time(date_now, tz_offset=0): # Reset the initial parameters for every date else the test is broken (#1022! caldate = MonthDateDaterange({ 'syear': 0, 'smon': 7, 'smday': 26, 'swday': 0, 'swday_offset': 0, 'eyear': 0, 'emon': 8, 'emday': 20, 'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0, 'other': '' }) print("-----\nToday is: %s, UTC: %s, expected: [%s, %s]" % ( datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), data[date_now]['start'], data[date_now]['end'] )) print("MonthDateDaterange: %s" % caldate.__dict__) print("Date: %s: %s" % (date_now, data[date_now])) start, end = caldate.get_start_and_end_time() print("-> start: %s, end: %s" % (start, end)) assert data[date_now]['start'] == start assert data[date_now]['end'] == end # ret = caldate.get_start_and_end_time() # print("-> res: %s" % ret) # print("* %s / %s" % (type(ret[0]), type(ret[1]))) # print("* %s / %s" % (int(ret[0]), int(ret[1]))) # assert data[date_now]['start'] == int(ret[0]) # assert data[date_now]['end'] == int(ret[1]) def test_weekdaydaterange_start_end_time(self): """ Test WeekDayDaterange.get_start_and_end_time to get start and end date of date range :return: None """ local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset data = { '2015-07-07 01:50:00 %s' % local_hour_offset: { 'start': 1436745600 + local_offset, 'end': 1437523199 + local_offset }, '2015-07-20 01:50:00 %s' % local_hour_offset: { 'start': 1436745600 + local_offset, 'end': 1437523199 + local_offset }, '2015-07-24 01:50:00 %s' % local_hour_offset: { 'start': 1439164800 + local_offset, 'end': 1439942399 + local_offset }, '2015-08-02 01:50:00 %s' % local_hour_offset: { 'start': 1439164800 + local_offset, 'end': 1439942399 + local_offset }, } # second monday - third tuesday params = {'syear': 0, 'smon': 0, 'smday': 0, 'swday': 0, 'swday_offset': 2, 'eyear': 0, 'emon': 0, 'emday': 0, 'ewday': 1, 'ewday_offset': 3, 'skip_interval': 0, 'other': ''} caldate = WeekDayDaterange(params) for date_now in data: with freeze_time(date_now, tz_offset=0): ret = caldate.get_start_and_end_time() print("* %s" % date_now) assert data[date_now]['start'] == ret[0] assert data[date_now]['end'] == ret[1] def test_monthdaydaterange_start_end_time(self): """ Test MonthDayDaterange.get_start_and_end_time to get start and end date of date range :return: None """ local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset data = { '2015-07-07 01:50:00 %s' % local_hour_offset: { 'start': 1438387200 + local_offset, 'end': 1438819199 + local_offset }, '2015-07-31 01:50:00 %s' % local_hour_offset: { 'start': 1438387200 + local_offset, 'end': 1438819199 + local_offset }, '2015-08-05 01:50:00 %s' % local_hour_offset: { 'start': 1438387200 + local_offset, 'end': 1438819199 + local_offset }, '2015-08-06 01:50:00 %s' % local_hour_offset: { 'start': 1441065600 + local_offset, 'end': 1441497599 + local_offset }, } # day -1 - 5 00:00-10:00 params = {'syear': 0, 'smon': 0, 'smday': 1, 'swday': 0, 'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': 5, 'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0, 'other': ''} caldate = MonthDayDaterange(params) for date_now in data: with freeze_time(date_now, tz_offset=0): ret = caldate.get_start_and_end_time() print("* %s" % date_now) assert data[date_now]['start'] == ret[0] assert data[date_now]['end'] == ret[1] def test_monthdaydaterange_start_end_time_negative(self): """ Test MonthDayDaterange.get_start_and_end_time to get start and end date of date range with negative values :return: None """ local_offset = time.timezone - 3600 * time.daylight # TS below are for UTC local_hour_offset = local_offset / 3600 if local_hour_offset >= 0: local_hour_offset = "-%02d" % local_hour_offset else: local_hour_offset = "+%02d" % -local_hour_offset data = { '2015-07-07 01:50:00 %s' % local_hour_offset: { 'start': 1438300800 + local_offset, 'end': 1438819199 + local_offset }, '2015-07-31 01:50:00 %s' % local_hour_offset: { 'start': 1438300800 + local_offset, 'end': 1438819199 + local_offset }, '2015-08-01 01:50:00 %s' % local_hour_offset: { 'start': 1438300800 + local_offset, 'end': 1438819199 + local_offset }, '2015-08-05 01:50:00 %s' % local_hour_offset: { 'start': 1438300800 + local_offset, 'end': 1438819199 + local_offset }, '2015-08-06 01:50:00 %s' % local_hour_offset: { 'start': 1440979200 + local_offset, 'end': 1441497599 + local_offset }, } # day -1 - 5 00:00-10:00 params = {'syear': 0, 'smon': 0, 'smday': -1, 'swday': 0, 'swday_offset': 0, 'eyear': 0, 'emon': 0, 'emday': 5, 'ewday': 0, 'ewday_offset': 0, 'skip_interval': 0, 'other': ''} caldate = MonthDayDaterange(params) for date_now in data: with freeze_time(date_now, tz_offset=0): ret = caldate.get_start_and_end_time() print("* %s" % date_now) assert data[date_now]['start'] == ret[0] assert data[date_now]['end'] == ret[1] def test_standarddaterange_is_correct(self): """ Test if time from next wednesday morning to next wednesday night is correct :return: None """ caldate = StandardDaterange({'day': 'wednesday', 'other': '00:00-24:00'}) assert caldate.is_correct() def test_monthweekdaydaterange_is_correct(self): """ Test if time from next wednesday morning to next wednesday night is correct :return: None """ params = {'syear': 2015, 'smon': 7, 'smday': 0, 'swday': 1, 'swday_offset': 2, 'eyear': 2015, 'emon': 8, 'emday': 0, 'ewday': 4, 'ewday_offset': 3, 'skip_interval': 0, 'other': ''} caldate = MonthWeekDayDaterange(params) assert caldate.is_correct() def test_resolve_daterange_case1(self): """ Test resolve daterange, case 1 :return: None """ timeperiod = Timeperiod({}) entry = '2015-07-26 - 2016-08-20 / 3 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 2015 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert 26 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 2016 == timeperiod.dateranges[0].eyear assert 8 == timeperiod.dateranges[0].emon assert 20 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 3 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case2(self): """ Test resolve daterange, case 2 :return: None """ timeperiod = Timeperiod({}) entry = '2015-07-26 / 7 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 2015 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert 26 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 2015 == timeperiod.dateranges[0].eyear assert 7 == timeperiod.dateranges[0].emon assert 26 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 7 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case3(self): """ Test resolve daterange, case 3 :return: None """ timeperiod = Timeperiod({}) entry = '2015-07-26 - 2016-08-20 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 2015 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert 26 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 2016 == timeperiod.dateranges[0].eyear assert 8 == timeperiod.dateranges[0].emon assert 20 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case4(self): """ Test resolve daterange, case 4 :return: None """ timeperiod = Timeperiod({}) entry = '2015-07-26 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 2015 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert 26 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 2015 == timeperiod.dateranges[0].eyear assert 7 == timeperiod.dateranges[0].emon assert 26 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case5(self): """ Test resolve daterange, case 5 :return: None """ timeperiod = Timeperiod({}) entry = 'tuesday 1 october - friday 2 may / 6 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 10 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 1 == timeperiod.dateranges[0].swday assert 1 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 5 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 4 == timeperiod.dateranges[0].ewday assert 2 == timeperiod.dateranges[0].ewday_offset assert 6 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case6(self): """ Test resolve daterange, case 6 :return: None """ timeperiod = Timeperiod({}) entry = 'monday 4 - thursday 3 / 2 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 4 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 3 == timeperiod.dateranges[0].ewday assert 3 == timeperiod.dateranges[0].ewday_offset assert 2 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case7(self): """ Test resolve daterange, case 7 :return: None """ timeperiod = Timeperiod({}) entry = 'march 4 - july 3 / 2 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 3 == timeperiod.dateranges[0].smon assert 4 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 7 == timeperiod.dateranges[0].emon assert 3 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 2 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case8(self): """ Test resolve daterange, case 8 :return: None """ timeperiod = Timeperiod({}) entry = 'day 4 - day 3 / 2 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 4 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 3 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 2 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case9(self): """ Test resolve daterange, case 9 :return: None """ timeperiod = Timeperiod({}) entry = 'friday 2 - 15 / 5 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 4 == timeperiod.dateranges[0].swday assert 2 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 4 == timeperiod.dateranges[0].ewday assert 15 == timeperiod.dateranges[0].ewday_offset assert 5 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case10(self): """ Test resolve daterange, case 10 :return: None """ timeperiod = Timeperiod({}) entry = 'july 2 - 15 / 5 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert 2 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 7 == timeperiod.dateranges[0].emon assert 15 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 5 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case11(self): """ Test resolve daterange, case 11 :return: None """ timeperiod = Timeperiod({}) entry = 'day 8 - 15 / 5 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 8 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 15 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 5 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case12(self): """ Test resolve daterange, case 12 :return: None """ timeperiod = Timeperiod({}) entry = 'tuesday 3 july - friday 2 september 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 1 == timeperiod.dateranges[0].swday assert 3 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 9 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 4 == timeperiod.dateranges[0].ewday assert 2 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case13(self): """ Test resolve daterange, case 13 :return: None """ timeperiod = Timeperiod({}) entry = 'friday 1 - 3 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 4 == timeperiod.dateranges[0].swday assert 1 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 4 == timeperiod.dateranges[0].ewday assert 3 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case14(self): """ Test resolve daterange, case 14 :return: None """ timeperiod = Timeperiod({}) entry = 'july -10 - -1 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 7 == timeperiod.dateranges[0].smon assert -10 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 7 == timeperiod.dateranges[0].emon assert -1 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case15(self): """ Test resolve daterange, case 15 :return: None """ timeperiod = Timeperiod({}) entry = 'day 1 - 15 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 1 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 15 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case16(self): """ Test resolve daterange, case 16 :return: None """ timeperiod = Timeperiod({}) entry = 'monday 3 - thursday 4 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 3 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 3 == timeperiod.dateranges[0].ewday assert 4 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case17(self): """ Test resolve daterange, case 17 :return: None """ timeperiod = Timeperiod({}) entry = 'april 10 - may 15 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 4 == timeperiod.dateranges[0].smon assert 10 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 5 == timeperiod.dateranges[0].emon assert 15 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case18(self): """ Test resolve daterange, case 18 :return: None """ timeperiod = Timeperiod({}) entry = 'day 10 - day 15 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 10 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 15 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case19(self): """ Test resolve daterange, case 19 :return: None """ timeperiod = Timeperiod({}) entry = 'tuesday 3 november 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 11 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 1 == timeperiod.dateranges[0].swday assert 3 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 11 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 1 == timeperiod.dateranges[0].ewday assert 3 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case20(self): """ Test resolve daterange, case 20 :return: None """ timeperiod = Timeperiod({}) entry = 'tuesday 3 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 0 == timeperiod.dateranges[0].smday assert 1 == timeperiod.dateranges[0].swday assert 3 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 0 == timeperiod.dateranges[0].emday assert 1 == timeperiod.dateranges[0].ewday assert 3 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case21(self): """ Test resolve daterange, case 21 :return: None """ timeperiod = Timeperiod({}) entry = 'may 3 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 5 == timeperiod.dateranges[0].smon assert 3 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 5 == timeperiod.dateranges[0].emon assert 3 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case22(self): """ Test resolve daterange, case 22 :return: None """ timeperiod = Timeperiod({}) entry = 'day 3 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 0 == timeperiod.dateranges[0].syear assert 0 == timeperiod.dateranges[0].smon assert 3 == timeperiod.dateranges[0].smday assert 0 == timeperiod.dateranges[0].swday assert 0 == timeperiod.dateranges[0].swday_offset assert 0 == timeperiod.dateranges[0].eyear assert 0 == timeperiod.dateranges[0].emon assert 3 == timeperiod.dateranges[0].emday assert 0 == timeperiod.dateranges[0].ewday assert 0 == timeperiod.dateranges[0].ewday_offset assert 0 == timeperiod.dateranges[0].skip_interval assert '00:00-24:00' == timeperiod.dateranges[0].other def test_resolve_daterange_case23(self): """ Test resolve daterange, case 23 :return: None """ timeperiod = Timeperiod({}) entry = 'sunday 00:00-24:00' timeperiod.resolve_daterange(timeperiod.dateranges, entry) assert 'sunday' == timeperiod.dateranges[0].day
class TestDateRanges(AlignakTest): ''' This class test dataranges ''' def setUp(self): pass def test_get_start_of_day(self): ''' Test function get_start_of_day and return the timestamp of begin of day :return: None ''' pass def test_get_start_of_day_tz_aware(self): ''' Test function get_start_of_day and return the timestamp of begin of day :return: None ''' pass def test_get_end_of_day(self): ''' Test function get_end_of_day and return the timestamp of end of day :return: None ''' pass def test_find_day_by_weekday_offset(self): ''' Test function find_day_by_weekday_offset to get day number. In this case, 1 = thuesday and -1 = last thuesday of July 2010, so it's the 27 july 2010 :return: None ''' pass def test_find_day_by_offset(self): ''' Test function find_day_by_offset to get the day with offset. In this case, the last day number of july, so the 31th :return: None ''' pass def test_calendardaterange_start_end_time(self): ''' Test CalendarDaterange.get_start_and_end_time to get start and end date of date range :return: None ''' pass def test_standarddaterange_start_end_time(self): ''' Test StandardDaterange.get_start_and_end_time to get start and end date of date range :return: None ''' pass def test_monthweekdaydaterange_start_end_time(self): ''' Test MonthWeekDayDaterange.get_start_and_end_time to get start and end date of date range :return: None ''' pass def test_monthdatedaterange_start_end_time(self): ''' Test MonthDateDaterange.get_start_and_end_time to get start and end date of date range :return: None ''' pass def test_weekdaydaterange_start_end_time(self): ''' Test WeekDayDaterange.get_start_and_end_time to get start and end date of date range :return: None ''' pass def test_monthdaydaterange_start_end_time(self): ''' Test MonthDayDaterange.get_start_and_end_time to get start and end date of date range :return: None ''' pass def test_monthdaydaterange_start_end_time_negative(self): ''' Test MonthDayDaterange.get_start_and_end_time to get start and end date of date range with negative values :return: None ''' pass def test_standarddaterange_is_correct(self): ''' Test if time from next wednesday morning to next wednesday night is correct :return: None ''' pass def test_monthweekdaydaterange_is_correct(self): ''' Test if time from next wednesday morning to next wednesday night is correct :return: None ''' pass def test_resolve_daterange_case1(self): ''' Test resolve daterange, case 1 :return: None ''' pass def test_resolve_daterange_case2(self): ''' Test resolve daterange, case 2 :return: None ''' pass def test_resolve_daterange_case3(self): ''' Test resolve daterange, case 3 :return: None ''' pass def test_resolve_daterange_case4(self): ''' Test resolve daterange, case 4 :return: None ''' pass def test_resolve_daterange_case5(self): ''' Test resolve daterange, case 5 :return: None ''' pass def test_resolve_daterange_case6(self): ''' Test resolve daterange, case 6 :return: None ''' pass def test_resolve_daterange_case7(self): ''' Test resolve daterange, case 7 :return: None ''' pass def test_resolve_daterange_case8(self): ''' Test resolve daterange, case 8 :return: None ''' pass def test_resolve_daterange_case9(self): ''' Test resolve daterange, case 9 :return: None ''' pass def test_resolve_daterange_case10(self): ''' Test resolve daterange, case 10 :return: None ''' pass def test_resolve_daterange_case11(self): ''' Test resolve daterange, case 11 :return: None ''' pass def test_resolve_daterange_case12(self): ''' Test resolve daterange, case 12 :return: None ''' pass def test_resolve_daterange_case13(self): ''' Test resolve daterange, case 13 :return: None ''' pass def test_resolve_daterange_case14(self): ''' Test resolve daterange, case 14 :return: None ''' pass def test_resolve_daterange_case15(self): ''' Test resolve daterange, case 15 :return: None ''' pass def test_resolve_daterange_case16(self): ''' Test resolve daterange, case 16 :return: None ''' pass def test_resolve_daterange_case17(self): ''' Test resolve daterange, case 17 :return: None ''' pass def test_resolve_daterange_case18(self): ''' Test resolve daterange, case 18 :return: None ''' pass def test_resolve_daterange_case19(self): ''' Test resolve daterange, case 19 :return: None ''' pass def test_resolve_daterange_case20(self): ''' Test resolve daterange, case 20 :return: None ''' pass def test_resolve_daterange_case21(self): ''' Test resolve daterange, case 21 :return: None ''' pass def test_resolve_daterange_case22(self): ''' Test resolve daterange, case 22 :return: None ''' pass def test_resolve_daterange_case23(self): ''' Test resolve daterange, case 23 :return: None ''' pass
39
38
22
2
17
4
2
0.23
1
11
7
0
38
0
38
93
901
107
653
149
614
148
517
149
478
6
2
2
61
3,973
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_daemon_start.py
tests.test_daemon_start.Test_Reactionner_Start
class Test_Reactionner_Start(TemplateDaemonStart, AlignakTest): def setUp(self): super(Test_Reactionner_Start, self).setUp() daemon_cls = Reactionner daemon_name = 'my_reactionner'
class Test_Reactionner_Start(TemplateDaemonStart, AlignakTest): def setUp(self): pass
2
0
2
0
2
0
1
0
2
1
0
0
1
0
1
73
6
1
5
4
3
0
5
4
3
1
2
0
1
3,974
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_aa_properties_default.py
tests.test_aa_properties_default.TestHostescalation
class TestHostescalation(PropertiesTester, AlignakTest): unused_props = [] without_default = [ 'host_name', 'hostgroup_name', 'first_notification', 'last_notification', 'first_notification_time', 'last_notification_time', ] properties = dict([ ('imported_from', 'alignak-self'), ('use', []), ('register', True), ('definition_order', 100), ('name', ''), ('notification_interval', 30), ('escalation_period', ''), ('escalation_options', ['d','x','r']), ('contacts', []), ('contact_groups', []), ]) def setUp(self): super(TestHostescalation, self).setUp() from alignak.objects.hostescalation import Hostescalation self.item = Hostescalation({}, parsing=True)
class TestHostescalation(PropertiesTester, AlignakTest): def setUp(self): pass
2
0
4
0
4
0
1
0
2
2
1
0
1
1
1
60
27
4
23
7
20
0
8
7
5
1
2
0
1
3,975
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_daemon_start.py
tests.test_daemon_start.Test_Broker_Start
class Test_Broker_Start(TemplateDaemonStart, AlignakTest): def setUp(self): super(Test_Broker_Start, self).setUp() daemon_cls = Broker daemon_name = 'my_broker'
class Test_Broker_Start(TemplateDaemonStart, AlignakTest): def setUp(self): pass
2
0
2
0
2
0
1
0
2
1
0
0
1
0
1
73
6
1
5
4
3
0
5
4
3
1
2
0
1
3,976
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/property.py
alignak.property.PathProp
class PathProp(StringProp): """ A string property representing a "running" (== VAR) file path """
class PathProp(StringProp): ''' A string property representing a "running" (== VAR) file path ''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
4
2
0
1
1
0
1
1
1
0
0
3
0
0
3,977
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/reactionnerlink.py
alignak.objects.reactionnerlink.ReactionnerLinks
class ReactionnerLinks(SatelliteLinks): # (Items): """ Class to manage list of ReactionnerLink. ReactionnerLinks is used to regroup all reactionners """ name_property = "reactionner_name" inner_class = ReactionnerLink
class ReactionnerLinks(SatelliteLinks): ''' Class to manage list of ReactionnerLink. ReactionnerLinks is used to regroup all reactionners ''' pass
1
1
0
0
0
0
0
1.67
1
0
0
0
0
0
0
48
7
0
3
3
2
5
3
3
2
0
3
0
0
3,978
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/realm.py
alignak.objects.realm.Realm
class Realm(Itemgroup): """Realm class is used to implement realm. It is basically a group of Hosts assigned to a specific Scheduler/Poller (other daemon are optional) """ my_type = 'realm' my_name_property = "%s_name" % my_type members_property = "members" group_members_property = "realm_members" properties = Itemgroup.properties.copy() properties.update({ 'realm_name': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'alias': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'realm_members': ListProp(default=[], split_on_comma=True), 'group_members': ListProp(default=[], split_on_comma=True), 'higher_realms': ListProp(default=[], split_on_comma=True), 'default': BoolProp(default=False) }) running_properties = Itemgroup.running_properties.copy() running_properties.update({ # Indicate if some only passively or actively checks host exist in the realm 'passively_checked_hosts': BoolProp(default=None), 'actively_checked_hosts': BoolProp(default=None), # Those lists contain only the uuid of the satellite link, not the whole object! 'arbiters': ListProp(default=[]), 'schedulers': ListProp(default=[]), 'brokers': ListProp(default=[]), 'pollers': ListProp(default=[]), 'reactionners': ListProp(default=[]), 'receivers': ListProp(default=[]), 'potential_schedulers': ListProp(default=[]), 'potential_brokers': ListProp(default=[]), 'potential_pollers': ListProp(default=[]), 'potential_reactionners': ListProp(default=[]), 'potential_receivers': ListProp(default=[]), # Once configuration is prepared, the count of the hosts in the realm 'hosts_count': IntegerProp(default=0), 'packs': DictProp(default={}), 'parts': DictProp(default={}), # Realm level in the realms hierarchy 'level': IntegerProp(default=-1), # All the sub realms (children and grand-children) 'all_sub_members': ListProp(default=[]), 'all_sub_members_names': ListProp(default=[]), }) macros = { 'REALMNAME': 'realm_name', 'REALMDEFAULT': 'default', 'REALMMEMBERS': 'members', 'REALMREALM_MEMBERS': 'realm_members', 'REALMGROUP_MEMBERS': 'group_members', 'REALMHOSTS_COUNT': 'hosts_count', } def __init__(self, params, parsing=True): super(Realm, self).__init__(params, parsing) self.level = -1 self.all_sub_members = [] self.all_sub_members_names = [] self.fill_default() # Define a packs list for the configuration preparation self.packs = [] # Once the configuration got prepared, packs becomes a dictionary! # packs is a dictionary indexed with the configuration part # number and containing the list of hosts # List of satellites related to the realm self.to_satellites = { 'reactionner': {}, 'poller': {}, 'broker': {}, 'receiver': {} } # List of satellites that need a configuration dispatch self.to_satellites_need_dispatch = { 'reactionner': {}, 'poller': {}, 'broker': {}, 'receiver': {} } # List of satellites with their managed configuration self.to_satellites_managed_by = { 'reactionner': {}, 'poller': {}, 'broker': {}, 'receiver': {} } # Attributes depending of the satellite type for sat_type in ['arbiter', 'scheduler', 'reactionner', 'poller', 'broker', 'receiver']: # Minimum is to have one satellite setattr(self, "nb_%ss" % sat_type, 0) setattr(self, 'potential_%ss' % sat_type, []) def __str__(self): res = '<Realm %s (%d)' % (self.get_name(), getattr(self, 'level', None)) if self.realm_members: res = res + ', %d sub-realms: %s' \ % (len(self.realm_members), ', '.join([str(s) for s in self.realm_members])) if self.all_sub_members_names: res = res + ', %d all sub-realms: %s' \ % (len(self.all_sub_members_names), ', '.join([str(s) for s in self.all_sub_members_names])) if getattr(self, 'hosts_count', None): res = res + ', %d hosts' % self.hosts_count if getattr(self, 'parts', None): res = res + ', %d parts' % len(self.parts) if getattr(self, 'packs', None): res = res + ', %d packs' % len(self.packs) return res + '/>' __repr__ = __str__ @property def name(self): """Get the realm name""" return self.get_name() def serialize(self, no_json=True, printing=False): """This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here we directly return all attributes :return: :rtype: dict """ return { "uuid": self.uuid, "realm_name": self.get_name(), "level": self.level } def add_group_members(self, members): """Add a new group member to the groups list :param members: member name :type members: str :return: None """ if not isinstance(members, list): members = [members] if not getattr(self, 'group_members', None): self.group_members = members else: self.group_members.extend(members) def prepare_satellites(self, satellites): """Update the following attributes of a realm:: * nb_*satellite type*s * self.potential_*satellite type*s (satellite types are scheduler, reactionner, poller, broker and receiver) :param satellites: dict of SatelliteLink objects :type satellites: dict :return: None """ for sat_type in ["scheduler", "reactionner", "poller", "broker", "receiver"]: # We get potential TYPE at realm level first for sat_link_uuid in getattr(self, "%ss" % sat_type): if sat_link_uuid not in satellites: continue sat_link = satellites[sat_link_uuid] # Found our declared satellite in the provided satellites if sat_link.active and not sat_link.spare: # Generic increment : realm.nb_TYPE += 1 setattr(self, "nb_%ss" % sat_type, getattr(self, "nb_%ss" % sat_type) + 1) break else: self.add_error("Realm %s, satellite %s declared in the realm is not found " "in the allowed satellites!" % (self.name, sat_link.name)) logger.error("Satellite %s declared in the realm %s not found " "in the allowed satellites!", sat_link.name, self.name) logger.info(" Realm %s: (in/potential) (schedulers:%d/%d) (pollers:%d/%d) " "(reactionners:%d/%d) (brokers:%d/%d) (receivers:%d/%d)", self.name, self.nb_schedulers, len(self.potential_schedulers), self.nb_pollers, len(self.potential_pollers), self.nb_reactionners, len(self.potential_reactionners), self.nb_brokers, len(self.potential_brokers), self.nb_receivers, len(self.potential_receivers)) def get_realms_by_explosion(self, realms): """Get all members of this realm including members of sub-realms on multi-levels :param realms: realms list, used to look for a specific one :type realms: alignak.objects.realm.Realms :return: list of members and add realm to realm_members attribute :rtype: list """ # If rec_tag is already set, then we detected a loop in the realms hierarchy! if getattr(self, 'rec_tag', False): self.add_error("Error: there is a loop in the realm definition %s" % self.get_name()) return None # Ok, not in a loop, we tag the realm and parse its members self.rec_tag = True # Order realm members list by name self.realm_members = sorted(self.realm_members) for member in self.realm_members: realm = realms.find_by_name(member) if not realm: self.add_unknown_members(member) continue children = realm.get_realms_by_explosion(realms) if children is None: # We got a loop in our children definition self.all_sub_members = [] self.realm_members = [] return None # Return the list of all unique members return self.all_sub_members def set_level(self, level, realms): """Set the realm level in the realms hierarchy :return: None """ self.level = level if not self.level: logger.info("- %s", self.get_name()) else: logger.info(" %s %s", '+' * self.level, self.get_name()) self.all_sub_members = [] self.all_sub_members_names = [] for child in sorted(self.realm_members): child = realms.find_by_name(child) if not child: continue self.all_sub_members.append(child.uuid) self.all_sub_members_names.append(child.get_name()) grand_children = child.set_level(self.level + 1, realms) for grand_child in grand_children: if grand_child in self.all_sub_members_names: continue grand_child = realms.find_by_name(grand_child) if grand_child: self.all_sub_members_names.append(grand_child.get_name()) self.all_sub_members.append(grand_child.uuid) return self.all_sub_members_names def get_all_subs_satellites_by_type(self, sat_type, realms): """Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list """ res = copy.copy(getattr(self, sat_type)) for member in self.all_sub_members: res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms)) return res def get_satellites_by_type(self, s_type): """Generic function to access one of the satellite attribute ie : self.pollers, self.reactionners ... :param s_type: satellite type wanted :type s_type: str :return: self.*type*s :rtype: list """ if hasattr(self, s_type + 's'): return getattr(self, s_type + 's') logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type) return [] def get_potential_satellites_by_type(self, satellites, s_type): """Generic function to access one of the potential satellite attribute ie : self.potential_pollers, self.potential_reactionners ... :param satellites: list of SatelliteLink objects :type satellites: SatelliteLink list :param s_type: satellite type wanted :type s_type: str :return: self.potential_*type*s :rtype: list """ if not hasattr(self, 'potential_' + s_type + 's'): logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type) return [] matching_satellites = [] for sat_link in satellites: if sat_link.uuid in getattr(self, s_type + 's'): matching_satellites.append(sat_link) if matching_satellites: logger.debug("- found %ss: %s", s_type, matching_satellites) return matching_satellites for sat_link in satellites: if sat_link.uuid in getattr(self, 'potential_' + s_type + 's'): matching_satellites.append(sat_link) # Do not limit to one satellite! # break logger.debug("- potential %ss: %s", s_type, matching_satellites) return matching_satellites def get_nb_of_must_have_satellites(self, s_type): """Generic function to access one of the number satellite attribute ie : self.nb_pollers, self.nb_reactionners ... :param s_type: satellite type wanted :type s_type: str :return: self.nb_*type*s :rtype: int """ if hasattr(self, 'nb_' + s_type + 's'): return getattr(self, 'nb_' + s_type + 's') logger.debug("[realm %s] do not have this kind of satellites: %s", self.name, s_type) return 0 def get_links_for_a_broker(self, pollers, reactionners, receivers, realms, manage_sub_realms=False): """Get a configuration dictionary with pollers, reactionners and receivers links for a broker :param pollers: pollers :type pollers: :param reactionners: reactionners :type reactionners: :param receivers: receivers :type receivers: :param realms: realms :type realms: :param manage_sub_realms: :type manage_sub_realms: True if the borker manages sub realms :return: dict containing pollers, reactionners and receivers links (key is satellite id) :rtype: dict """ # Create void satellite links cfg = { 'pollers': {}, 'reactionners': {}, 'receivers': {}, } # Our self.daemons are only identifiers... that we use to fill the satellite links for poller_id in self.pollers: poller = pollers[poller_id] cfg['pollers'][poller.uuid] = poller.give_satellite_cfg() for reactionner_id in self.reactionners: reactionner = reactionners[reactionner_id] cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg() for receiver_id in self.receivers: receiver = receivers[receiver_id] cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg() # If the broker manages sub realms, fill the satellite links... if manage_sub_realms: # Now pollers for poller_id in self.get_all_subs_satellites_by_type('pollers', realms): poller = pollers[poller_id] cfg['pollers'][poller.uuid] = poller.give_satellite_cfg() # Now reactionners for reactionner_id in self.get_all_subs_satellites_by_type('reactionners', realms): reactionner = reactionners[reactionner_id] cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg() # Now receivers for receiver_id in self.get_all_subs_satellites_by_type('receivers', realms): receiver = receivers[receiver_id] cfg['receivers'][receiver.uuid] = receiver.give_satellite_cfg() return cfg def get_links_for_a_scheduler(self, pollers, reactionners, brokers): """Get a configuration dictionary with pollers, reactionners and brokers links for a scheduler :return: dict containing pollers, reactionners and brokers links (key is satellite id) :rtype: dict """ # Create void satellite links cfg = { 'pollers': {}, 'reactionners': {}, 'brokers': {}, } # Our self.daemons are only identifiers... that we use to fill the satellite links try: for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"): if poller in pollers: poller = pollers[poller] cfg['pollers'][poller.uuid] = poller.give_satellite_cfg() for reactionner in self.reactionners + self.get_potential_satellites_by_type( reactionners, "reactionner"): if reactionner in reactionners: reactionner = reactionners[reactionner] cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg() for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"): if broker in brokers: broker = brokers[broker] cfg['brokers'][broker.uuid] = broker.give_satellite_cfg() except Exception as exp: # pylint: disable=broad-except logger.exception("realm.get_links_for_a_scheduler: %s", exp) # for poller in self.get_potential_satellites_by_type(pollers, "poller"): # logger.info("Poller: %s", poller) # cfg['pollers'][poller.uuid] = poller.give_satellite_cfg() # # for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"): # cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg() # # for broker in self.get_potential_satellites_by_type(brokers, "broker"): # cfg['brokers'][broker.uuid] = broker.give_satellite_cfg() return cfg
class Realm(Itemgroup): '''Realm class is used to implement realm. It is basically a group of Hosts assigned to a specific Scheduler/Poller (other daemon are optional) ''' def __init__(self, params, parsing=True): pass def __str__(self): pass @property def name(self): '''Get the realm name''' pass def serialize(self, no_json=True, printing=False): '''This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here we directly return all attributes :return: :rtype: dict ''' pass def add_group_members(self, members): '''Add a new group member to the groups list :param members: member name :type members: str :return: None ''' pass def prepare_satellites(self, satellites): '''Update the following attributes of a realm:: * nb_*satellite type*s * self.potential_*satellite type*s (satellite types are scheduler, reactionner, poller, broker and receiver) :param satellites: dict of SatelliteLink objects :type satellites: dict :return: None ''' pass def get_realms_by_explosion(self, realms): '''Get all members of this realm including members of sub-realms on multi-levels :param realms: realms list, used to look for a specific one :type realms: alignak.objects.realm.Realms :return: list of members and add realm to realm_members attribute :rtype: list ''' pass def set_level(self, level, realms): '''Set the realm level in the realms hierarchy :return: None ''' pass def get_all_subs_satellites_by_type(self, sat_type, realms): '''Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list ''' pass def get_satellites_by_type(self, s_type): '''Generic function to access one of the satellite attribute ie : self.pollers, self.reactionners ... :param s_type: satellite type wanted :type s_type: str :return: self.*type*s :rtype: list ''' pass def get_potential_satellites_by_type(self, satellites, s_type): '''Generic function to access one of the potential satellite attribute ie : self.potential_pollers, self.potential_reactionners ... :param satellites: list of SatelliteLink objects :type satellites: SatelliteLink list :param s_type: satellite type wanted :type s_type: str :return: self.potential_*type*s :rtype: list ''' pass def get_nb_of_must_have_satellites(self, s_type): '''Generic function to access one of the number satellite attribute ie : self.nb_pollers, self.nb_reactionners ... :param s_type: satellite type wanted :type s_type: str :return: self.nb_*type*s :rtype: int ''' pass def get_links_for_a_broker(self, pollers, reactionners, receivers, realms, manage_sub_realms=False): '''Get a configuration dictionary with pollers, reactionners and receivers links for a broker :param pollers: pollers :type pollers: :param reactionners: reactionners :type reactionners: :param receivers: receivers :type receivers: :param realms: realms :type realms: :param manage_sub_realms: :type manage_sub_realms: True if the borker manages sub realms :return: dict containing pollers, reactionners and receivers links (key is satellite id) :rtype: dict ''' pass def get_links_for_a_scheduler(self, pollers, reactionners, brokers): '''Get a configuration dictionary with pollers, reactionners and brokers links for a scheduler :return: dict containing pollers, reactionners and brokers links (key is satellite id) :rtype: dict ''' pass
16
13
26
4
14
8
4
0.46
1
4
0
0
14
10
14
58
466
69
272
62
255
126
169
59
154
8
4
3
59
3,979
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/realm.py
alignak.objects.realm.Realms
class Realms(Itemgroups): """Realms manage a list of Realm objects, used for parsing configuration """ inner_class = Realm def __init__(self, items, index_items=True, parsing=True): super(Realms, self).__init__(items, index_items, parsing) self.default = None def linkify(self): """The realms linkify is done during the default realms/satellites initialization in the Config class. This functione only finishes the process by setting the realm level property according to the realm position in the hierarchy. All ` level` 0 realms are main realms that have their own hierarchy. :return: None """ logger.info("Known realms:") for realm in self: for tmp_realm in self: # Ignore if it is me... if tmp_realm == realm: continue # Ignore if I am a sub realm of another realm if realm.get_name() in tmp_realm.realm_members: break else: # This realm is not in the children of any realm realm.level = 0 realm.set_level(0, self) def explode(self): """Explode realms with each realm_members and higher_realms to get all the realms sub realms. :return: None """ # Manage higher realms where defined for realm in [tmp_realm for tmp_realm in self if tmp_realm.higher_realms]: for parent in realm.higher_realms: higher_realm = self.find_by_name(parent) if higher_realm: # Add the realm to its parent realm members higher_realm.realm_members.append(realm.get_name()) for realm in self: # Set a recursion tag to protect against loop for tmp_realm in self: tmp_realm.rec_tag = False realm.get_realms_by_explosion(self) # Clean the recursion tag for tmp_realm in self: del tmp_realm.rec_tag def get_default(self, check=False): """Get the default realm :param check: check correctness if True :type check: bool :return: Default realm of Alignak configuration :rtype: alignak.objects.realm.Realm | None """ found = [] for realm in sorted(self, key=lambda r: r.level): if getattr(realm, 'default', False): found.append(realm) if not found: # Retain as default realm the first realm in name alphabetical order found_names = sorted([r.get_name() for r in self]) if not found_names: self.add_error("No realm is defined in this configuration! " "This should not be possible!") return None default_realm_name = found_names[0] default_realm = self.find_by_name(default_realm_name) default_realm.default = True found.append(default_realm) if check: self.add_error("No realm is defined as the default one! " "I set %s as the default realm" % default_realm_name) default_realm = found[0] if len(found) > 1: # Retain as default realm the first so-called default realms in name alphabetical order found_names = sorted([r.get_name() for r in found]) default_realm_name = found_names[0] default_realm = self.find_by_name(default_realm_name) # Set all found realms as non-default realms for realm in found: if realm.get_name() != default_realm_name: realm.default = False if check: self.add_warning("More than one realm is defined as the default one: %s. " "I set %s as the default realm." % (','.join(found_names), default_realm_name)) self.default = default_realm return default_realm def prepare_satellites(self, satellites): """Init the following attributes for each realm:: * to_satellites (with *satellite type* keys) * to_satellites_need_dispatch (with *satellite type* keys) * to_satellites_managed_by (with *satellite type* keys) * nb_*satellite type*s * self.potential_*satellite type*s (satellite type are reactionner, poller, broker and receiver) :param satellites: (schedulers, brokers, reactionners, pollers, receivers) :type satellites: alignak.object.satelittelink.SatelliteLink :return: None """ for realm in self: realm.prepare_satellites(satellites)
class Realms(Itemgroups): '''Realms manage a list of Realm objects, used for parsing configuration ''' def __init__(self, items, index_items=True, parsing=True): pass def linkify(self): '''The realms linkify is done during the default realms/satellites initialization in the Config class. This functione only finishes the process by setting the realm level property according to the realm position in the hierarchy. All ` level` 0 realms are main realms that have their own hierarchy. :return: None ''' pass def explode(self): '''Explode realms with each realm_members and higher_realms to get all the realms sub realms. :return: None ''' pass def get_default(self, check=False): '''Get the default realm :param check: check correctness if True :type check: bool :return: Default realm of Alignak configuration :rtype: alignak.objects.realm.Realm | None ''' pass def prepare_satellites(self, satellites): '''Init the following attributes for each realm:: * to_satellites (with *satellite type* keys) * to_satellites_need_dispatch (with *satellite type* keys) * to_satellites_managed_by (with *satellite type* keys) * nb_*satellite type*s * self.potential_*satellite type*s (satellite type are reactionner, poller, broker and receiver) :param satellites: (schedulers, brokers, reactionners, pollers, receivers) :type satellites: alignak.object.satelittelink.SatelliteLink :return: None ''' pass
6
5
23
3
12
8
5
0.63
1
1
0
0
5
1
5
51
126
23
63
19
57
40
59
19
53
10
3
3
25
3,980
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/receiverlink.py
alignak.objects.receiverlink.ReceiverLink
class ReceiverLink(SatelliteLink): """ Class to manage the receiver information """ my_type = 'receiver' my_name_property = "%s_name" % my_type properties = SatelliteLink.properties.copy() properties.update({ 'type': StringProp(default='receiver', fill_brok=[FULL_STATUS], to_send=True), 'receiver_name': StringProp(default='', fill_brok=[FULL_STATUS], to_send=True), 'port': IntegerProp(default=7772, fill_brok=[FULL_STATUS], to_send=True), })
class ReceiverLink(SatelliteLink): ''' Class to manage the receiver information ''' pass
1
1
0
0
0
0
0
0.25
1
0
0
0
0
0
0
70
16
1
12
4
11
3
5
4
4
0
4
0
0
3,981
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/receiverlink.py
alignak.objects.receiverlink.ReceiverLinks
class ReceiverLinks(SatelliteLinks): """ Class to manage list of ReceiverLink. ReceiverLinks is used to regroup all receivers """ name_property = "receiver_name" inner_class = ReceiverLink
class ReceiverLinks(SatelliteLinks): ''' Class to manage list of ReceiverLink. ReceiverLinks is used to regroup all receivers ''' pass
1
1
0
0
0
0
0
1.33
1
0
0
0
0
0
0
48
7
0
3
3
2
4
3
3
2
0
3
0
0
3,982
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/property.py
alignak.property.Property
class Property(object): # pylint: disable=too-many-instance-attributes """Base class for all properties. Same semantic for all subclasses (except UnusedProp): The property is required if, and only if, the default value is `None`. """ def __init__(self, default=NONE_OBJECT, class_inherit=None, # pylint: disable=R0913 unmanaged=False, _help='', no_slots=False, fill_brok=None, brok_transformation=None, retention=False, retention_preparation=None, retention_restoration=None, to_send=False, override=False, managed=True, split_on_comma=True, keep_empty=False, merging='uniq', special=False): # pylint: disable=too-many-locals """ `default`: the default value to be used if this property is not set. If default is None, this property is required. `class_inherit`: List of 2-tuples, (Service, 'blabla'): must set this property to the Service class with name blabla. if (Service, None): must set this property to the Service class with same name `unmanaged`: .... `help`: usage text `no_slots`: do not take this property for __slots__ `fill_brok`: if set, send to broker. There are two categories: FULL_STATUS for initial and update status, CHECK_RESULT for check results `retention`: if set, the property will be saved in the retention files `retention_preparation`: function name, if set, this function will be called with the property before saving the date to the retention `retention_restoration`: function name, if set, this function will be called with the restored retention data `split_on_comma`: indicates that list property value should not be split on comma delimiter (values may contain commas that we want to keep). Only for the initial call: brok_transformation: if set, will call the function with the value of the property when flattening data is necessary (like realm_name instead of the realm object). override: for scheduler, if the property must override the value of the configuration we send it managed: property that is managed in Nagios but not in Alignak merging: for merging properties, should we take only one or we can link with , special: Is this property "special" : need a special management see is_correct function in host and service """ self.default = default self.has_default = (default is not NONE_OBJECT) self.required = not self.has_default self.class_inherit = class_inherit or [] self.help = _help or '' self.unmanaged = unmanaged self.no_slots = no_slots self.fill_brok = fill_brok or [] self.brok_transformation = brok_transformation self.retention = retention self.retention_preparation = retention_preparation self.retention_restoration = retention_restoration self.to_send = to_send self.override = override self.managed = managed self.unused = False self.merging = merging self.split_on_comma = split_on_comma self.keep_empty = keep_empty self.special = special def __repr__(self): # pragma: no cover return '<Property %r, default: %r />' % (self.__class__, self.default) __str__ = __repr__ def pythonize(self, val): # pylint: disable=no-self-use """Generic pythonize method :param val: value to python :type val: :return: the value itself :rtype: """ return val
class Property(object): '''Base class for all properties. Same semantic for all subclasses (except UnusedProp): The property is required if, and only if, the default value is `None`. ''' def __init__(self, default=NONE_OBJECT, class_inherit=None, # pylint: disable=R0913 unmanaged=False, _help='', no_slots=False, fill_brok=None, brok_transformation=None, retention=False, retention_preparation=None, retention_restoration=None, to_send=False, override=False, managed=True, split_on_comma=True, keep_empty=False, merging='uniq', special=False): ''' `default`: the default value to be used if this property is not set. If default is None, this property is required. `class_inherit`: List of 2-tuples, (Service, 'blabla'): must set this property to the Service class with name blabla. if (Service, None): must set this property to the Service class with same name `unmanaged`: .... `help`: usage text `no_slots`: do not take this property for __slots__ `fill_brok`: if set, send to broker. There are two categories: FULL_STATUS for initial and update status, CHECK_RESULT for check results `retention`: if set, the property will be saved in the retention files `retention_preparation`: function name, if set, this function will be called with the property before saving the date to the retention `retention_restoration`: function name, if set, this function will be called with the restored retention data `split_on_comma`: indicates that list property value should not be split on comma delimiter (values may contain commas that we want to keep). Only for the initial call: brok_transformation: if set, will call the function with the value of the property when flattening data is necessary (like realm_name instead of the realm object). override: for scheduler, if the property must override the value of the configuration we send it managed: property that is managed in Nagios but not in Alignak merging: for merging properties, should we take only one or we can link with , special: Is this property "special" : need a special management see is_correct function in host and service ''' pass def __repr__(self): pass def pythonize(self, val): '''Generic pythonize method :param val: value to python :type val: :return: the value itself :rtype: ''' pass
4
3
31
5
10
16
1
1.69
1
0
0
10
3
20
3
3
104
21
32
30
23
54
27
25
23
1
1
0
3
3,983
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/resultmodulation.py
alignak.objects.resultmodulation.Resultmodulation
class Resultmodulation(Item): """Resultmodulation class is simply a modulation of a check result exit code during a modulation_period. """ my_type = 'resultmodulation' my_name_property = "%s_name" % my_type properties = Item.properties.copy() properties.update({ 'resultmodulation_name': StringProp(), 'exit_codes_match': IntListProp(default=[]), 'exit_code_modulation': IntegerProp(default=None), 'modulation_period': StringProp(default=None), }) special_properties = ('modulation_period',) def is_active(self, timperiods): """ Know if this result modulation is active now :return: True is we are in the period, otherwise False :rtype: bool """ now = int(time.time()) timperiod = timperiods[self.modulation_period] if not timperiod or timperiod.is_time_valid(now): return True return False def module_return(self, return_code, timeperiods): """Module the exit code if necessary :: * modulation_period is legit * exit_code_modulation * return_code in exit_codes_match :param return_code: actual code returned by the check :type return_code: int :return: return_code modulated if necessary (exit_code_modulation) :rtype: int """ # Only if in modulation_period of modulation_period == None if self.is_active(timeperiods): # Try to change the exit code only if a new one is defined if self.exit_code_modulation is not None: # First with the exit_code_match if return_code in self.exit_codes_match: return_code = self.exit_code_modulation return return_code
class Resultmodulation(Item): '''Resultmodulation class is simply a modulation of a check result exit code during a modulation_period. ''' def is_active(self, timperiods): ''' Know if this result modulation is active now :return: True is we are in the period, otherwise False :rtype: bool ''' pass def module_return(self, return_code, timeperiods): '''Module the exit code if necessary :: * modulation_period is legit * exit_code_modulation * return_code in exit_codes_match :param return_code: actual code returned by the check :type return_code: int :return: return_code modulated if necessary (exit_code_modulation) :rtype: int ''' pass
3
3
17
2
6
9
3
0.74
1
1
0
0
2
0
2
36
56
9
27
9
24
20
18
9
15
4
3
3
6
3,984
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/satellitelink.py
alignak.objects.satellitelink.LinkError
class LinkError(Exception): """Exception raised for errors with the satellite links. Attributes: msg -- explanation of the error """ def __init__(self, msg): super(LinkError, self).__init__(msg) logger.error(msg) self.msg = msg def __str__(self): # pragma: no cover """Exception to String""" return "Satellite link error: %s" % self.msg
class LinkError(Exception): '''Exception raised for errors with the satellite links. Attributes: msg -- explanation of the error ''' def __init__(self, msg): pass def __str__(self): '''Exception to String''' pass
3
2
4
0
3
1
1
0.86
1
1
0
0
2
1
2
12
15
3
7
4
4
6
7
4
4
1
3
0
2
3,985
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/satellitelink.py
alignak.objects.satellitelink.SatelliteLinks
class SatelliteLinks(Items): """Class to handle serveral SatelliteLink""" name_property = "name" inner_class = SatelliteLink def __repr__(self): # pragma: no cover return '<%r %d elements: %r/>' % \ (self.__class__.__name__, len(self), ', '.join([s.name for s in self])) __str__ = __repr__ def linkify(self, modules): """Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None """ logger.debug("Linkify %s with %s", self, modules) self.linkify_s_by_module(modules) def linkify_s_by_module(self, modules): """ Link modules to items :param modules: Modules object (list of all the modules found in the configuration) :type modules: alignak.objects.module.Modules :return: None """ for i in self: links_list = strip_and_uniq(i.modules) new = [] for name in [e for e in links_list if e]: module = modules.find_by_name(name) if module is not None and module.uuid not in new: new.append(module) else: i.add_error("Error: the module %s is unknown for %s" % (name, i.get_name())) i.modules = new
class SatelliteLinks(Items): '''Class to handle serveral SatelliteLink''' def __repr__(self): pass def linkify(self, modules): '''Link modules and Satellite links :param modules: Module object list :type modules: alignak.objects.module.Modules :return: None ''' pass def linkify_s_by_module(self, modules): ''' Link modules to items :param modules: Modules object (list of all the modules found in the configuration) :type modules: alignak.objects.module.Modules :return: None ''' pass
4
3
11
1
6
4
2
0.62
1
0
0
6
3
0
3
48
41
8
21
12
17
13
19
12
15
4
2
3
6
3,986
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/schedulerlink.py
alignak.objects.schedulerlink.SchedulerLink
class SchedulerLink(SatelliteLink): """ Class to manage the scheduler information """ # Ok we lie a little here because we are a mere link in fact my_type = 'scheduler' my_name_property = "%s_name" % my_type properties = SatelliteLink.properties.copy() properties.update({ 'type': StringProp(default=u'scheduler', fill_brok=[FULL_STATUS], to_send=True), 'scheduler_name': StringProp(default='', fill_brok=[FULL_STATUS]), 'port': IntegerProp(default=7768, fill_brok=[FULL_STATUS], to_send=True), 'weight': IntegerProp(default=1, fill_brok=[FULL_STATUS]), 'skip_initial_broks': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), 'accept_passive_unknown_check_results': BoolProp(default=False, fill_brok=[FULL_STATUS], to_send=True), }) running_properties = SatelliteLink.running_properties.copy() running_properties.update({ # 'conf': # StringProp(default=None), # 'cfg': # DictProp(default={}), 'need_conf': StringProp(default=True), 'external_commands': StringProp(default=[]), }) def get_override_configuration(self): """ Some parameters can give as 'overridden parameters' like use_timezone so they will be mixed (in the scheduler) with the standard conf sent by the arbiter :return: dictionary of properties :rtype: dict """ res = {} properties = self.__class__.properties for prop, entry in list(properties.items()): if entry.override: res[prop] = getattr(self, prop) return res
class SchedulerLink(SatelliteLink): ''' Class to manage the scheduler information ''' def get_override_configuration(self): ''' Some parameters can give as 'overridden parameters' like use_timezone so they will be mixed (in the scheduler) with the standard conf sent by the arbiter :return: dictionary of properties :rtype: dict ''' pass
2
2
14
1
7
6
3
0.44
1
1
0
0
1
0
1
71
51
5
32
9
30
14
14
9
12
3
4
2
3
3,987
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/schedulingitem.py
alignak.objects.schedulingitem.SchedulingItem
class SchedulingItem(Item): # pylint: disable=too-many-instance-attributes """SchedulingItem class provide method for Scheduler to handle Service or Host objects """ # global counters used for [current|last]_[host|service]_[event|problem]_id current_event_id = 0 current_problem_id = 0 properties = Item.properties.copy() properties.update({ 'display_name': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'initial_state': CharProp(default='o', fill_brok=[FULL_STATUS]), 'max_check_attempts': IntegerProp(default=1, fill_brok=[FULL_STATUS]), 'check_interval': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT]), 'retry_interval': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT]), 'active_checks_enabled': BoolProp(default=True, fill_brok=[FULL_STATUS], retention=True), 'passive_checks_enabled': BoolProp(default=True, fill_brok=[FULL_STATUS], retention=True), 'check_period': StringProp(fill_brok=[FULL_STATUS], special=True), # Set a default freshness threshold not 0 if parameter is missing # and check_freshness is enabled 'check_freshness': BoolProp(default=False, fill_brok=[FULL_STATUS]), 'freshness_threshold': IntegerProp(default=0, fill_brok=[FULL_STATUS]), 'event_handler': StringProp(default='', fill_brok=[FULL_STATUS]), 'event_handler_enabled': BoolProp(default=False, fill_brok=[FULL_STATUS], retention=True), 'low_flap_threshold': IntegerProp(default=25, fill_brok=[FULL_STATUS]), 'high_flap_threshold': IntegerProp(default=50, fill_brok=[FULL_STATUS]), 'flap_detection_enabled': BoolProp(default=True, fill_brok=[FULL_STATUS], retention=True), 'process_perf_data': BoolProp(default=True, fill_brok=[FULL_STATUS], retention=True), 'retain_status_information': BoolProp(default=True, fill_brok=[FULL_STATUS]), 'retain_nonstatus_information': BoolProp(default=True, fill_brok=[FULL_STATUS]), 'contacts': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True), 'contact_groups': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True), 'notification_interval': IntegerProp(default=60, fill_brok=[FULL_STATUS], special=True), 'first_notification_delay': IntegerProp(default=0, fill_brok=[FULL_STATUS]), 'notification_period': StringProp(fill_brok=[FULL_STATUS], special=True), 'notifications_enabled': BoolProp(default=True, fill_brok=[FULL_STATUS], retention=True), 'stalking_options': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join'), 'notes': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'notes_url': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'action_url': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'icon_image': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'icon_image_alt': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'icon_set': StringProp(default=u'', fill_brok=[FULL_STATUS]), # Alignak specific 'poller_tag': StringProp(default=u'None'), 'reactionner_tag': StringProp(default=u'None'), 'escalations': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True), 'maintenance_period': StringProp(default=r'', fill_brok=[FULL_STATUS]), 'time_to_orphanage': IntegerProp(default=300, fill_brok=[FULL_STATUS]), 'labels': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join', split_on_comma=True), # BUSINESS CORRELATOR PART # Business rules output format template 'business_rule_output_template': StringProp(default=u'', fill_brok=[FULL_STATUS]), # Business rules notifications mode 'business_rule_smart_notifications': BoolProp(default=False, fill_brok=[FULL_STATUS]), # Treat downtimes as acknowledgements in smart notifications 'business_rule_downtime_as_ack': BoolProp(default=False, fill_brok=[FULL_STATUS]), # Enforces child nodes notification options 'business_rule_host_notification_options': ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True), 'business_rule_service_notification_options': ListProp(default=[], fill_brok=[FULL_STATUS], split_on_comma=True), # Business_Impact value 'business_impact': IntegerProp(default=2, fill_brok=[FULL_STATUS]), # Trending 'trending_policies': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join'), # Our check ways. By defualt void, but will filled by an inner if need 'checkmodulations': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join'), 'macromodulations': ListProp(default=[], merging='join'), 'resultmodulations': ListProp(default=[], merging='join'), 'business_impact_modulations': ListProp(default=[], merging='join'), # Custom views 'custom_views': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join'), # Snapshot part 'snapshot_enabled': BoolProp(default=False), 'snapshot_command': StringProp(default=u''), 'snapshot_period': StringProp(default=u''), 'snapshot_interval': IntegerProp(default=5), }) running_properties = Item.running_properties.copy() running_properties.update({ 'modified_attributes': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'last_chk': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'next_chk': IntegerProp(default=0, fill_brok=[FULL_STATUS, 'next_schedule'], retention=True), 'in_checking': BoolProp(default=False, fill_brok=[FULL_STATUS, CHECK_RESULT, 'next_schedule']), 'in_maintenance': StringProp(default='', fill_brok=[FULL_STATUS], retention=True), 'latency': FloatProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'attempt': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'state_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'current_event_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_event_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_state': StringProp(default='PENDING', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_state_type': StringProp(default='HARD', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_state_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_state_change': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_hard_state_change': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_hard_state': StringProp(default='PENDING', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_hard_state_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'state_type': StringProp(default='HARD', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'state_type_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'duration_sec': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'output': StringProp(default='', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'long_output': StringProp(default='', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'is_flapping': BoolProp(default=False, fill_brok=[FULL_STATUS], retention=True), # dependencies for actions like notification or event handler, # so AFTER check return 'act_depend_of': ListProp(default=[]), # dependencies for checks raise, so BEFORE checks 'chk_depend_of': ListProp(default=[]), # elements that depend of me, so the reverse than just upper 'act_depend_of_me': ListProp(default=[]), # elements that depend of me 'chk_depend_of_me': ListProp(default=[]), 'last_state_update': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'checks_in_progress': ListProp(default=[]), 'notifications_in_progress': DictProp(default={}, retention=True, retention_preparation=dict_to_serialized_dict), 'comments': DictProp(default={}, fill_brok=[FULL_STATUS], retention=True, retention_preparation=dict_to_serialized_dict), 'flapping_changes': ListProp(default=[], fill_brok=[FULL_STATUS], retention=True), 'flapping_comment_id': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'percent_state_change': FloatProp(default=0.0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'problem_has_been_acknowledged': BoolProp(default=False, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'acknowledgement': StringProp(default=None, retention=True, retention_preparation=to_serialized, retention_restoration=from_serialized), 'acknowledgement_type': IntegerProp(default=1, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'has_been_checked': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'should_be_scheduled': IntegerProp(default=1, fill_brok=[FULL_STATUS], retention=True), 'last_problem_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'current_problem_id': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'execution_time': FloatProp(default=0.0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'u_time': FloatProp(default=0.0), 's_time': FloatProp(default=0.0), 'last_notification': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'current_notification_number': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'current_notification_id': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'check_flapping_recovery_notification': BoolProp(default=True, fill_brok=[FULL_STATUS], retention=True), 'scheduled_downtime_depth': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'pending_flex_downtime': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), 'timeout': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'start_time': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'end_time': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'early_timeout': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'return_code': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'perf_data': StringProp(default='', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_perf_data': StringProp(default='', retention=True), 'customs': DictProp(default={}, fill_brok=[FULL_STATUS]), # Warning: for the notified_contacts sent to the retention, # we only save the names of the contacts, and we will relink # them on retention loading. 'notified_contacts': ListProp(default=[], retention=True), 'notified_contacts_ids': ListProp(default=[]), 'in_scheduled_downtime': BoolProp(default=False, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'in_scheduled_downtime_during_last_check': BoolProp(default=False, retention=True), 'actions': ListProp(default=[]), 'broks': ListProp(default=[]), # Problem/impact part 'is_problem': BoolProp(default=False, fill_brok=[FULL_STATUS]), 'is_impact': BoolProp(default=False, fill_brok=[FULL_STATUS]), # the save value of our business_impact for "problems" 'my_own_business_impact': IntegerProp(default=-1, fill_brok=[FULL_STATUS]), # list of problems that make us an impact 'source_problems': ListProp(default=[], fill_brok=[FULL_STATUS]), # list of the impact I'm the cause of 'impacts': ListProp(default=[], fill_brok=[FULL_STATUS]), # keep a trace of the old state before being an impact 'state_before_impact': StringProp(default='PENDING'), # keep a trace of the old state id before being an impact 'state_id_before_impact': IntegerProp(default=0), # if the state change, we know so we do not revert it 'state_changed_since_impact': BoolProp(default=False), # BUSINESS CORRELATOR PART # Say if we are business based rule or not 'got_business_rule': BoolProp(default=False, fill_brok=[FULL_STATUS]), # Previously processed business rule (with macro expanded) 'processed_business_rule': StringProp(default="", fill_brok=[FULL_STATUS]), # Our Dependency node for the business rule 'business_rule': StringProp(default=None), # Here it's the elements we are depending on # so our parents as network relation, or a host # we are depending in a hostdependency # or even if we are business based. 'parent_dependencies': ListProp(default=[], fill_brok=[FULL_STATUS]), # Here it's the guys that depend on us. So it's the total # opposite of the parent_dependencies 'child_dependencies': ListProp(default=[], fill_brok=[FULL_STATUS]), # Manage the unknown/unreachable during hard state 'in_hard_unknown_reach_phase': BoolProp(default=False, retention=True), 'was_in_hard_unknown_reach_phase': BoolProp(default=False, retention=True), # Set if the element just change its father/son topology 'topology_change': BoolProp(default=False, fill_brok=[FULL_STATUS]), # snapshots part 'last_snapshot': IntegerProp(default=0, fill_brok=[FULL_STATUS], retention=True), # Keep the string of the last command launched for this element 'last_check_command': StringProp(default=''), # Define if we are in the freshness expiration period 'freshness_expired': BoolProp(default=False, fill_brok=[FULL_STATUS], retention=True), # Store if the freshness log got raised 'freshness_log_raised': BoolProp(default=False, retention=True), }) macros = { # Business rules output formatting related macros 'STATUS': ('get_status', ['hosts', 'services']), 'SHORTSTATUS': ('get_short_status', ['hosts', 'services']), 'FULLNAME': 'get_full_name', } old_properties = { 'normal_check_interval': 'check_interval', 'retry_check_interval': 'retry_interval', 'criticity': 'business_impact', } special_properties = () def __init__(self, params, parsing=True): if not parsing: # When deserialized, those are dictionaries for prop in ['check_command', 'event_handler', 'snapshot_command', 'business_rule', 'acknowledgement']: if prop not in params or params[prop] is None: continue setattr(self, prop, unserialize(params[prop])) del params[prop] # else: # self.processed_business_rule = None # self.business_rule = None super(SchedulingItem, self).__init__(params, parsing=parsing) # Default values for unset parameters will be filled in Host or Service class! @property def monitored(self): """Simple property renaming for better API;)""" return self.active_checks_enabled or self.passive_checks_enabled @property def last_check(self): """Simple property renaming for better API;)""" return self.last_chk @property def next_check(self): """Simple property renaming for better API;)""" return self.next_chk @property def acknowledged(self): """Simple property renaming for better API;)""" return self.problem_has_been_acknowledged @property def downtimed(self): """Simple property renaming for better API;)""" return self.in_scheduled_downtime # fixme: Item base class method seems to be enough! # def serialize(self, no_json=True, printing=False): # if self.my_type == 'service': # print("*** Serializing service (SI): %s" % self) # # res = super(SchedulingItem, self).serialize(no_json=no_json, # printing=printing) # # if self.my_type == 'service': # print("*** Serializing service (SI): %s" % res) # # for prop in ['check_command', 'event_handler', 'snapshot_command', # 'business_rule', 'acknowledgement']: # res[prop] = serialize(getattr(self, prop, None), # no_json=no_json, printing=printing) # # return res def get_check_command(self): """Wrapper to get the name of the check_command attribute :return: check_command name :rtype: str """ if not getattr(self, 'check_command', None): return '' return self.check_command.get_name() def change_check_command(self, command, commands): """ Change the check command :param command: the new command :type command: str :param commands: the available command items :type command: alignak.objecrs.command.Commands :return: """ data = {"commands": commands, "command_line": command, "poller_tag": self.poller_tag} setattr(self, 'check_command', CommandCall(data, parsing=True)) def change_event_handler(self, command, commands): """ Change the event handler command :param command: the new command :type command: str :param commands: the available command items :type command: alignak.objecrs.command.Commands :return: """ data = {"commands": commands, "command_line": command, "poller_tag": self.poller_tag} setattr(self, 'event_handler', CommandCall(data, parsing=True)) def change_snapshot_command(self, command, commands): """ Change the snapshot command :param command: the new command :type command: str :param commands: the available command items :type command: alignak.objecrs.command.Commands """ data = {"commands": commands, "command_line": command, "poller_tag": self.poller_tag} setattr(self, 'snapshot_command', CommandCall(data, parsing=True)) def add_flapping_change(self, sample): """Add a flapping sample and keep cls.flap_history samples :param sample: Sample to add :type sample: bool :return: None """ cls = self.__class__ # If this element is not in flapping check, or # the flapping is globally disable, bailout if not self.flap_detection_enabled or not cls.enable_flap_detection: return self.flapping_changes.append(sample) # Keep just 20 changes (global flap_history value) flap_history = cls.flap_history if len(self.flapping_changes) > flap_history: self.flapping_changes.pop(0) def update_flapping(self, notif_period, hosts, services): """Compute the sample list (self.flapping_changes) and determine whether the host/service is flapping or not :param notif_period: notification period object for this host/service :type notif_period: alignak.object.timeperiod.Timeperiod :param hosts: Hosts objects, used to create notification if necessary :type hosts: alignak.objects.host.Hosts :param services: Services objects, used to create notification if necessary :type services: alignak.objects.service.Services :return: None :rtype: Nonetype """ flap_history = self.__class__.flap_history # We compute the flapping change in % res = 0.0 i = 0 for has_changed in self.flapping_changes: i += 1 if has_changed: res += i * (1.2 - 0.8) / flap_history + 0.8 res = res / flap_history res *= 100 # We can update our value self.percent_state_change = res # Look if we are full in our states, because if not # the value is not accurate is_full = len(self.flapping_changes) >= flap_history # Now we get the low_flap_threshold and high_flap_threshold values # They can be from self, or class (low_flap_threshold, high_flap_threshold) = (self.low_flap_threshold, self.high_flap_threshold) # TODO: no more useful because a default value is defined, but is it really correct? if low_flap_threshold == -1: # pragma: no cover, never used cls = self.__class__ low_flap_threshold = cls.global_low_flap_threshold if high_flap_threshold == -1: # pragma: no cover, never used cls = self.__class__ high_flap_threshold = cls.global_high_flap_threshold # Now we check is flapping change, but only if we got enough # states to look at the value accuracy if self.is_flapping and res < low_flap_threshold and is_full: self.is_flapping = False # We also raise a log entry self.raise_flapping_stop_log_entry(res, low_flap_threshold) # and a notification self.remove_in_progress_notifications(master=True) self.create_notifications('FLAPPINGSTOP', notif_period, hosts, services) # And update our status for modules self.broks.append(self.get_update_status_brok()) if not self.is_flapping and res >= high_flap_threshold and is_full: self.is_flapping = True # We also raise a log entry self.raise_flapping_start_log_entry(res, high_flap_threshold) # and a notification self.remove_in_progress_notifications(master=True) self.create_notifications('FLAPPINGSTART', notif_period, hosts, services) # And update our status for modules self.broks.append(self.get_update_status_brok()) def add_attempt(self): """Add an attempt when a object is a non-ok state :return: None """ self.attempt += 1 self.attempt = min(self.attempt, self.max_check_attempts) def is_max_attempts(self): """Check if max check attempt is reached :return: True if self.attempt >= self.max_check_attempts, otherwise False :rtype: bool """ return self.attempt >= self.max_check_attempts def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations, checks, when): # pylint: disable=too-many-nested-blocks, too-many-branches """Check freshness and schedule a check now if necessary. This function is called by the scheduler if Alignak is configured to check the freshness. It is called for hosts that have the freshness check enabled if they are only passively checked. It is called for services that have the freshness check enabled if they are only passively checked and if their depending host is not in a freshness expired state (freshness_expiry = True). A log is raised when the freshess expiry is detected and the item is set as freshness_expiry. :param hosts: hosts objects, used to launch checks :type hosts: alignak.objects.host.Hosts :param services: services objects, used launch checks :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get check_period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: A check or None :rtype: None | object """ now = when # Before, check if class (host or service) have check_freshness OK # Then check if item want freshness, then check freshness cls = self.__class__ if not self.in_checking and self.freshness_threshold and not self.freshness_expired: # logger.debug("Checking freshness for %s, last state update: %s, now: %s.", # self.get_full_name(), self.last_state_update, now) if os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- -> checking freshness for: %s", self.get_full_name()) # If we never checked this item, we begin the freshness period if not self.last_state_update: self.last_state_update = int(now) if self.last_state_update < now - \ (self.freshness_threshold + cls.additional_freshness_latency): timeperiod = timeperiods[self.check_period] if timeperiod is None or timeperiod.is_time_valid(now): # Create a new check for the scheduler chk = self.launch_check(now, hosts, services, timeperiods, macromodulations, checkmodulations, checks) if not chk: logger.warning("No raised freshness check for: %s", self) return None chk.freshness_expiry_check = True chk.check_time = time.time() chk.output = "Freshness period expired: %s" % ( datetime.utcfromtimestamp(int(chk.check_time)).strftime( "%Y-%m-%d %H:%M:%S %Z")) if self.my_type == 'host': if self.freshness_state == 'o': chk.exit_status = 0 elif self.freshness_state == 'd': chk.exit_status = 2 elif self.freshness_state in ['u', 'x']: chk.exit_status = 4 else: chk.exit_status = 3 else: if self.freshness_state == 'o': chk.exit_status = 0 elif self.freshness_state == 'w': chk.exit_status = 1 elif self.freshness_state == 'c': chk.exit_status = 2 elif self.freshness_state == 'u': chk.exit_status = 3 elif self.freshness_state == 'x': chk.exit_status = 4 else: chk.exit_status = 3 return chk else: logger.debug("Ignored freshness check for %s, because " "we are not in the check period.", self.get_full_name()) return None def set_myself_as_problem(self, hosts, services, timeperiods, bi_modulations): # pylint: disable=too-many-locals """ Raise all impact from my error. I'm setting myself as a problem, and I register myself as this in all hosts/services that depend_on_me. So they are now my impacts :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get act_depend_of_me timeperiod :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulations objects :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None """ now = time.time() self.is_problem = True # we should warn potentials impact of our problem # and they should be cool to register them so I've got # my impacts list impacts = list(self.impacts) for (impact_id, status, timeperiod_id, _) in self.act_depend_of_me: # Check if the status is ok for impact if impact_id in hosts: impact = hosts[impact_id] elif impact_id in services: impact = services[impact_id] else: logger.warning("Problem with my impacts: %s", self) timeperiod = timeperiods[timeperiod_id] for stat in status: if self.is_state(stat): # now check if we should bailout because of a # not good timeperiod for dep if timeperiod is None or timeperiod.is_time_valid(now): new_impacts = impact.register_a_problem(self, hosts, services, timeperiods, bi_modulations) impacts.extend(new_impacts) # Only update impacts and create new brok if impacts changed. s_impacts = set(impacts) if s_impacts == set(self.impacts): return self.impacts = list(s_impacts) # We can update our business_impact value now self.update_business_impact_value(hosts, services, timeperiods, bi_modulations) # And we register a new broks for update status self.broks.append(self.get_update_status_brok()) def update_business_impact_value(self, hosts, services, timeperiods, bi_modulations): """We update our 'business_impact' value with the max of the impacts business_impact if we got impacts. And save our 'configuration' business_impact if we do not have do it before If we do not have impacts, we revert our value :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get modulation_period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulations objects :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None """ # First save our business_impact if not already do if self.my_own_business_impact == -1: self.my_own_business_impact = self.business_impact # We look at our crit modulations. If one apply, we take apply it # and it's done in_modulation = False for bi_modulation_id in self.business_impact_modulations: bi_modulation = bi_modulations[bi_modulation_id] now = time.time() period = timeperiods[bi_modulation.modulation_period] if period is None or period.is_time_valid(now): self.business_impact = bi_modulation.business_impact in_modulation = True # We apply the first available, that's all break # If we truly have impacts, we get the max business_impact # if it's huge than ourselves if self.impacts: bp_impacts = [hosts[elem].business_impact for elem in self.impacts if elem in hosts] bp_impacts.extend([services[elem].business_impact for elem in self.impacts if elem in services]) self.business_impact = max(self.business_impact, max(bp_impacts)) return # If we are not a problem, we setup our own_crit if we are not in a # modulation period if self.my_own_business_impact != -1 and not in_modulation: self.business_impact = self.my_own_business_impact def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations): """Remove this objects as an impact for other schedulingitem. :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for update_business_impact_value :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None """ was_pb = self.is_problem if self.is_problem: self.is_problem = False # we warn impacts that we are no more a problem for impact_id in self.impacts: if impact_id in hosts: impact = hosts[impact_id] else: impact = services[impact_id] impact.unregister_a_problem(self) # we can just drop our impacts list self.impacts = [] # We update our business_impact value, it's not a huge thing :) self.update_business_impact_value(hosts, services, timeperiods, bi_modulations) # If we were a problem, we say to everyone # our new status, with good business_impact value if was_pb: # And we register a new broks for update status self.broks.append(self.get_update_status_brok()) def register_a_problem(self, prob, hosts, services, timeperiods, bi_modulations): # pylint: disable=too-many-locals """Call recursively by potentials impacts so they update their source_problems list. But do not go below if the problem is not a real one for me like If I've got multiple parents for examples :param prob: problem to register :type prob: alignak.objects.schedulingitem.SchedulingItem :param hosts: hosts objects, used to get object in act_depend_of_me :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of_me :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: list of host/service that are impacts :rtype: list[alignak.objects.schedulingitem.SchedulingItem] """ # Maybe we already have this problem? If so, bailout too if prob.uuid in self.source_problems: return [] now = time.time() was_an_impact = self.is_impact # Our father already look if he impacts us. So if we are here, # it's that we really are impacted self.is_impact = True impacts = [] # Ok, if we are impacted, we can add it in our # problem list # TODO: remove this unused check if self.is_impact: logger.debug("I am impacted: %s", self) # Maybe I was a problem myself, now I can say: not my fault! if self.is_problem: self.no_more_a_problem(hosts, services, timeperiods, bi_modulations) # Ok, we are now impacted, we should take the good state # but only when we just go to the impacted state if not was_an_impact: self.set_impact_state() # Ok now we can be a simple impact impacts.append(self.uuid) if prob.uuid not in self.source_problems: self.source_problems.append(prob.uuid) # we should send this problem to all potential impacted that # depend on us for (impacted_item_id, status, timeperiod_id, _) in self.act_depend_of_me: # Check if the status is ok for impact if impacted_item_id in hosts: impact = hosts[impacted_item_id] else: impact = services[impacted_item_id] timeperiod = timeperiods[timeperiod_id] for stat in status: if self.is_state(stat): # now check if we should bailout because of a # not good timeperiod for dep if timeperiod is None or timeperiod.is_time_valid(now): new_impacts = impact.register_a_problem(prob, hosts, services, timeperiods, bi_modulations) impacts.extend(new_impacts) # And we register a new broks for update status self.broks.append(self.get_update_status_brok()) # now we return all impacts (can be void of course) return impacts def unregister_a_problem(self, prob): """Remove the problem from our problems list and check if we are still 'impacted' :param prob: problem to remove :type prob: alignak.objects.schedulingitem.SchedulingItem :return: None """ self.source_problems.remove(prob.uuid) # For know if we are still an impact, maybe our dependencies # are not aware of the remove of the impact state because it's not ordered # so we can just look at if we still have some problem in our list if not self.source_problems: self.is_impact = False # No more an impact, we can unset the impact state self.unset_impact_state() # And we register a new broks for update status self.broks.append(self.get_update_status_brok()) def is_enable_action_dependent(self, hosts, services): """ Check if dependencies states match dependencies statuses This basically means that a dependency is in a bad state and it can explain this object state. :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: True if all dependencies matches the status, false otherwise :rtype: bool """ # Use to know if notification is raise or not enable_action = False for (dep_id, status, _, _) in self.act_depend_of: if 'n' in status: enable_action = True else: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] p_is_down = False dep_match = [dep.is_state(stat) for stat in status] # check if the parent match a case, so he is down if True in dep_match: p_is_down = True if not p_is_down: enable_action = True return enable_action def check_and_set_unreachability(self, hosts, services): """ Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None """ parent_is_down = [] for (dep_id, _, _, _) in self.act_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']: parent_is_down.append(True) else: parent_is_down.append(False) if False in parent_is_down: return # all parents down self.set_unreachable() def do_i_raise_dependency(self, status, inherit_parents, hosts, services, timeperiods): # pylint: disable=too-many-locals """Check if this object or one of its dependency state (chk dependencies) match the status :param status: state list where dependency matters (notification failure criteria) :type status: list :param inherit_parents: recurse over parents :type inherit_parents: bool :param hosts: hosts objects, used to raise dependency check :type hosts: alignak.objects.host.Hosts :param services: services objects, used to raise dependency check :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if one state matched the status list, otherwise False :rtype: bool """ # Do I raise dep? for stat in status: if self.is_state(stat): return True # If we do not inherit parent, we have no reason to be blocking if not inherit_parents: return False # Ok, I do not raise dep, but my dep maybe raise me now = time.time() for (dep_id, dep_status, _, timeperiod_id, inh_parent) in self.chk_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] timeperiod = timeperiods[timeperiod_id] if dep.do_i_raise_dependency(dep_status, inh_parent, hosts, services, timeperiods): if timeperiod is None or timeperiod.is_time_valid(now): return True # No, I really do not raise... return False def is_no_check_dependent(self, hosts, services, timeperiods): """Check if there is some host/service that this object depend on has a state in the status list . :param hosts: hosts objects, used to raise dependency check :type hosts: alignak.objects.host.Hosts :param services: services objects, used to raise dependency check :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if this object has a check dependency, otherwise False :rtype: bool """ now = time.time() for (dep_id, status, _, timeperiod_id, inh_parent) in self.chk_depend_of: timeperiod = timeperiods[timeperiod_id] if timeperiod is None or timeperiod.is_time_valid(now): if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] if dep.do_i_raise_dependency(status, inh_parent, hosts, services, timeperiods): return True return False def raise_dependencies_check(self, ref_check, hosts, services, timeperiods, macromodulations, checkmodulations, checks): # pylint: disable=too-many-locals, too-many-nested-blocks """Get checks that we depend on if EVERY following conditions is met:: * timeperiod is valid * dep.last_state_update < now - cls.cached_check_horizon (check of dependency is "old") :param ref_check: Check we want to get dependency from :type ref_check: alignak.check.Check :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: check created and check in_checking :rtype: dict """ now = time.time() cls = self.__class__ new_checks = [] checking_checks = [] for (dep_id, _, timeperiod_id, _) in self.act_depend_of: if dep_id in hosts: dep_item = hosts[dep_id] else: dep_item = services[dep_id] timeperiod = timeperiods[timeperiod_id] # If the dep_item timeperiod is not valid, do not raise the dep, # None=everytime if timeperiod is None or timeperiod.is_time_valid(now): # if the update is 'fresh', do not raise dep, # cached_check_horizon = cached_service_check_horizon for service if dep_item.last_state_update < now - cls.cached_check_horizon: # Do not launch the check if it depends on a passive check of if a check # is yet planned if dep_item.active_checks_enabled: if not dep_item.in_checking: newchk = dep_item.launch_check(now, hosts, services, timeperiods, macromodulations, checkmodulations, checks, ref_check, dependent=True) if newchk is not None: new_checks.append(newchk) else: if dep_item.checks_in_progress: check_uuid = dep_item.checks_in_progress[0] checks[check_uuid].depend_on_me.append(ref_check) checking_checks.append(check_uuid) return {'new': new_checks, 'checking': checking_checks} def schedule(self, hosts, services, timeperiods, macromodulations, checkmodulations, checks, force=False, force_time=None): # pylint: disable=too-many-branches, too-many-arguments, too-many-locals """Main scheduling function If a check is in progress, or active check are disabled, do not schedule a check. The check interval change with HARD state:: * SOFT: retry_interval * HARD: check_interval The first scheduling is evenly distributed, so all checks are not launched at the same time. :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :param force: tell if we forced this object to schedule a check :type force: bool :param force_time: time we would like the check to be scheduled :type force_time: None | int :return: None """ # next_chk is already set, do not change # unless we force the check or the time if self.in_checking and not (force or force_time): return None cls = self.__class__ # if no active check and no force, no check if (not self.active_checks_enabled or not cls.execute_checks) and not force: logger.debug("No check for %s", self.get_full_name()) return None now = time.time() current_next_check = self.next_chk # If check_interval is 0, we should not add a check for a service # but suppose a 5 min check interval for an host if self.check_interval == 0 and not force: if cls.my_type == 'service': return None self.check_interval = 300 / cls.interval_length # Interval change is in a HARD state or not # If the retry is 0, take the normal value if self.state_type == 'HARD' or self.retry_interval == 0: interval = self.check_interval * cls.interval_length else: interval = self.retry_interval * cls.interval_length # Determine when a new check (randomize and distribute next check time) # or recurring check should happen. if self.next_chk == 0: # At the start, we cannot have an interval more than cls.max_check_spread # Global service_max_check_spread or host_max_check_spread in configuration # is set as max_check_spread in the objects. interval = min(interval, cls.max_check_spread * cls.interval_length) time_add = interval * random.uniform(0.0, 1.0) else: time_add = interval # Do the actual Scheduling now # If not force_time, try to schedule if force_time is None: check_period = None if getattr(self, 'check_period', None) is not None: check_period = timeperiods[self.check_period] # Do not calculate next_chk based on current time, but # based on the last check execution time. # Important for consistency of data for trending. if self.next_chk == 0 or self.next_chk is None: self.next_chk = now # If the neck_chk is already in the future, do not touch it. # But if == 0, means was 0 in fact, schedule it too if self.next_chk <= now: # maybe we do not have a check_period, if so, take always good (24x7) if check_period: self.next_chk = check_period.get_next_valid_time_from_t( self.next_chk + time_add) else: self.next_chk = int(self.next_chk + time_add) # Maybe we load next_chk from retention and the # value of the next_chk is still in the past even after adding an interval if self.next_chk < now: interval = min(interval, cls.max_check_spread * cls.interval_length) time_add = interval * random.uniform(0.0, 1.0) # if we got a check period, use it, if now, use now if check_period: self.next_chk = check_period.get_next_valid_time_from_t(now + time_add) else: self.next_chk = int(now + time_add) # else: keep the self.next_chk value in the future else: self.next_chk = int(force_time) # If next time is None, do not go if self.next_chk is None: # Nagios do not raise it, I'm wondering if we should return None logger.debug("-> schedule: %s / %s (interval: %d, added: %d)", self.get_full_name(), datetime.utcfromtimestamp(self.next_chk).strftime('%Y-%m-%d %H:%M:%S'), interval, time_add) if current_next_check != self.next_chk and os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- -> scheduled the next check for %s " "at %s (interval: %d, added: %d)", self.get_full_name(), datetime.utcfromtimestamp(self.next_chk).strftime('%Y-%m-%d %H:%M:%S'), interval, time_add) # Get the command to launch, and put it in queue return self.launch_check(self.next_chk, hosts, services, timeperiods, macromodulations, checkmodulations, checks, force=force) def compensate_system_time_change(self, difference): # pragma: no cover, # not with unit tests """If a system time change occurs we have to update properties time related to reflect change :param difference: difference between new time and old time :type difference: :return: None """ # We only need to change some value for prop in ('last_notification', 'last_state_change', 'last_hard_state_change'): val = getattr(self, prop) # current value # Do not go below 1970 :) val = max(0, val + difference) # diff may be negative setattr(self, prop, val) def disable_active_checks(self, checks): """Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None """ self.active_checks_enabled = False for chk_id in self.checks_in_progress: chk = checks[chk_id] chk.status = ACT_STATUS_WAIT_CONSUME chk.exit_status = self.state_id chk.output = self.output chk.check_time = time.time() chk.execution_time = 0 chk.perf_data = self.perf_data def remove_in_progress_check(self, check): """Remove check from check in progress :param check: Check to remove :type check: alignak.objects.check.Check :return: None """ # The check is consumed, update the in_checking properties if check in self.checks_in_progress: self.checks_in_progress.remove(check) self.update_in_checking() def update_in_checking(self): """Update in_checking attribute. Object is in checking if we have checks in check_in_progress list :return: None """ self.in_checking = (len(self.checks_in_progress) != 0) def remove_in_progress_notification(self, notification): """ Remove a notification and mark them as zombie :param notification: the notification to remove :type notification: alignak.notification.Notification :return: None """ if notification.uuid in self.notifications_in_progress: notification.status = ACT_STATUS_ZOMBIE del self.notifications_in_progress[notification.uuid] def remove_in_progress_notifications(self, master=True): """Remove all notifications from notifications_in_progress Preserves some specific notifications (downtime, ...) :param master: remove master notifications only if True (default value) :type master: bool :param force: force remove all notifications except if False :type force: bool :return:None """ for notification in list(self.notifications_in_progress.values()): if master and notification.contact: continue # Do not remove some specific notifications if notification.type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED', u'CUSTOM', u'ACKNOWLEDGEMENT']: continue self.remove_in_progress_notification(notification) def get_event_handlers(self, hosts, macromodulations, timeperiods, ext_cmd=False): """Raise event handlers if NONE of the following conditions is met:: * externalcmd is False and event_handlers are disabled (globally or locally) * externalcmd is False and object is in scheduled dowtime and no event handlers in downtime * self.event_handler and cls.global_event_handler are None :param hosts: hosts objects, used to get data for macros :type hosts: alignak.objects.host.Hosts :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used for macros evaluation :type timeperiods: alignak.objects.timeperiod.Timeperiods :param ext_cmd: tells if this function was called when handling an external_command. :type ext_cmd: bool :return: None """ cls = self.__class__ # The external command always pass # if not, only if we enable them (auto launch) if not ext_cmd and (not self.event_handler_enabled or not cls.enable_event_handlers): logger.debug("Event handler is disabled for %s", self.get_full_name()) return # If we do not force and we are in downtime, bailout # if the no_event_handlers_during_downtimes is set in the configuration if not ext_cmd and self.in_scheduled_downtime and cls.no_event_handlers_during_downtimes: logger.debug("Event handler will not be launched. " "The item %s is in a scheduled downtime", self.get_full_name()) return if self.event_handler: event_handler = self.event_handler elif cls.global_event_handler: event_handler = cls.global_event_handler else: return mr = MacroResolver() data = self.get_data_for_event_handler(hosts) cmd = mr.resolve_command(event_handler, data, macromodulations, timeperiods) event_h = EventHandler({ 'command': cmd, 'timeout': cls.event_handler_timeout, 'ref': self.uuid, 'reactionner_tag': event_handler.reactionner_tag }) self.raise_event_handler_log_entry(event_handler) # ok we can put it in our temp action queue self.actions.append(event_h) def get_snapshot(self, hosts, macromodulations, timeperiods): # pragma: no cover, not yet! """ Raise snapshot event handlers if NONE of the following conditions is met:: * snapshot_command is None * snapshot_enabled is disabled * snapshot_criteria does not matches current state * last_snapshot > now - snapshot_interval * interval_length (previous snapshot too early) * snapshot_period is not valid :param hosts: hosts objects, used to get data for macros :type hosts: alignak.objects.host.Hosts :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used for snapshot period and macros evaluation :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None """ # We should have a snapshot_command, to be enabled and of course # in the good time and state :D if self.snapshot_command is None: return if not self.snapshot_enabled: return # look at if one state is matching the criteria boolmap = [self.is_state(s) for s in self.snapshot_criteria] if True not in boolmap: return # Time based checks now, we should be in the period and not too far # from the last_snapshot now = int(time.time()) cls = self.__class__ if self.last_snapshot > now - self.snapshot_interval * cls.interval_length: # too close return # no period means 24x7 :) timeperiod = timeperiods[self.snapshot_period] if timeperiod is not None and not timeperiod.is_time_valid(now): return cls = self.__class__ mr = MacroResolver() data = self.get_data_for_event_handler(hosts) cmd = mr.resolve_command(self.snapshot_command, data, macromodulations, timeperiods) reac_tag = self.snapshot_command.reactionner_tag event_h = EventHandler({ 'command': cmd, 'timeout': cls.event_handler_timeout, 'ref': self.uuid, 'reactionner_tag': reac_tag, 'is_snapshot': True }) self.raise_snapshot_log_entry(self.snapshot_command) # we save the time we launch the snap self.last_snapshot = now # ok we can put it in our temp action queue self.actions.append(event_h) def check_for_flexible_downtime(self, timeperiods, hosts, services): """Enter in a downtime if necessary and raise start notification When a non Ok state occurs we try to raise a flexible downtime. :param timeperiods: Timeperiods objects, used for downtime period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param hosts: hosts objects, used to enter downtime :type hosts: alignak.objects.host.Hosts :param services: services objects, used to enter downtime :type services: alignak.objects.service.Services :return: None """ status_updated = False for downtime_id in self.downtimes: downtime = self.downtimes[downtime_id] # Activate flexible downtimes (do not activate triggered downtimes) # Note: only activate if we are between downtime start and end time! if downtime.fixed or downtime.is_in_effect: continue if downtime.start_time <= self.last_chk and downtime.end_time >= self.last_chk \ and self.state_id != 0 and downtime.trigger_id in ['', '0']: # returns downtimestart notifications self.broks.extend(downtime.enter(timeperiods, hosts, services)) status_updated = True if status_updated is True: self.broks.append(self.get_update_status_brok()) def update_hard_unknown_phase_state(self): """Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important, and they should not raise notif about it :return: None """ self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase # We do not care about SOFT state at all # and we are sure we are no more in such a phase if self.state_type != 'HARD' or self.last_state_type != 'HARD': self.in_hard_unknown_reach_phase = False # So if we are not in already in such a phase, we check for # a start or not. So here we are sure to be in a HARD/HARD following # state if not self.in_hard_unknown_reach_phase: if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \ or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE': self.in_hard_unknown_reach_phase = True # We also backup with which state we was before enter this phase self.state_before_hard_unknown_reach_phase = self.last_state return else: # if we were already in such a phase, look for its end if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE': self.in_hard_unknown_reach_phase = False # If we just exit the phase, look if we exit with a different state # than we enter or not. If so, lie and say we were not in such phase # because we need so to raise a new notif if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase: if self.state != self.state_before_hard_unknown_reach_phase: self.was_in_hard_unknown_reach_phase = False def consume_result(self, chk, notification_period, hosts, services, timeperiods, macromodulations, checkmodulations, bi_modulations, res_modulations, checks, raise_log): # pylint: disable=too-many-locals, too-many-arguments # pylint: disable=too-many-branches, too-many-statements """Consume a check return and send action in return main function of reaction of checks like raise notifications Special cases:: * is_flapping: immediate notification when a problem raises * is_in_scheduled_downtime: no notification * is_volatile: immediate notification when a problem raises (service only) Basically go through all cases (combination of last_state, current_state, attempt number) and do necessary actions (add attempt, raise notification., change state type.) :param chk: check to handle :type chk: alignak.objects.check.Check :param notification_period: notification period for this host/service :type notification_period: alignak.objects.timeperiod.Timeperiod :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :param res_modulations: result modulation are used to change the ouput of a check :type res_modulations: alignak.object.resultmodulation.Resultmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: Dependent checks :rtype list[alignak.check.Check] """ ok_up = self.__class__.ok_up # OK for service, UP for host now = int(time.time()) if not chk.freshness_expiry_check: self.freshness_expired = False if 'ALIGNAK_LOG_ACTIONS' in os.environ: if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING': logger.warning("Got check result: %s for %s", chk.exit_status, self.get_full_name()) else: logger.info("Got check result: %s for %s", chk.exit_status, self.get_full_name()) if os.getenv('ALIGNAK_LOG_CHECKS', None): level = ['info', 'warning', 'error', 'critical'][min(chk.exit_status, 3)] func = getattr(logger, level) func("--ALC-- check result for %s, exit: %d, output: %s", self.get_full_name(), chk.exit_status, chk.output) # ============ MANAGE THE CHECK ============ # # Check is not OK, waiting to consume the results but it has some dependencies # We put this check in waitdep state, and we create the checks of dependent items # and nothing else ;) if chk.exit_status != 0 and chk.status == ACT_STATUS_WAIT_CONSUME and self.act_depend_of: chk.status = ACT_STATUS_WAIT_DEPEND # Make sure the check know about his dep # C is my check, and he wants dependencies deps_checks = self.raise_dependencies_check(chk, hosts, services, timeperiods, macromodulations, checkmodulations, checks) # Get checks_id of dep for check in deps_checks['new']: chk.depend_on.append(check.uuid) for check_uuid in deps_checks['checking']: chk.depend_on.append(check_uuid) # we must wait dependent check checked and consumed return deps_checks['new'] # We check for stalking if necessary # so if check is here self.manage_stalking(chk) # ============ UPDATE ITEM INFORMATION ============ # # Latency can be <0 is we get a check from the retention file # so if <0, set 0 try: self.latency = max(0, chk.check_time - chk.t_to_go) except TypeError: # pragma: no cover, simple protection pass # Ok, the first check is done self.has_been_checked = 1 # Now get data from check self.execution_time = chk.execution_time self.u_time = chk.u_time self.s_time = chk.s_time self.last_chk = int(chk.check_time) self.output = chk.output self.long_output = chk.long_output if self.__class__.process_performance_data and self.process_perf_data: self.last_perf_data = self.perf_data self.perf_data = chk.perf_data # Before setting state, modulate them for resultmod_id in self.resultmodulations: resultmod = res_modulations[resultmod_id] if resultmod is not None: chk.exit_status = resultmod.module_return(chk.exit_status, timeperiods) if not chk.freshness_expiry_check: # Only update the last state date if not in freshness expiry self.last_state_update = now if chk.exit_status == 1 and self.__class__.my_type == 'host': chk.exit_status = 2 self.set_state_from_exit_status(chk.exit_status, notification_period, hosts, services) self.last_state_type = self.state_type self.return_code = chk.exit_status # Raise the log only when the item information are up-to-date :/ if raise_log: self.raise_check_result() # we change the state, do whatever we are or not in # an impact mode, we can put it self.state_changed_since_impact = True # The check is consumed, update the in_checking properties self.remove_in_progress_check(chk.uuid) # Used to know if a notification is raised or not enable_action = True # This check was waiting for a check of items it depends if chk.status == ACT_STATUS_WAIT_DEPEND: # Check dependencies enable_action = self.is_enable_action_dependent(hosts, services) # If all dependencies not ok, define item as UNREACHABLE self.check_and_set_unreachability(hosts, services) if chk.status in [ACT_STATUS_WAIT_CONSUME, ACT_STATUS_WAIT_DEPEND]: # check waiting consume or waiting result of dependencies if chk.depend_on_me != []: # one or more checks wait this check (dependency) chk.status = ACT_STATUS_WAITING_ME else: # the check go in zombie state to be removed later chk.status = ACT_STATUS_ZOMBIE # from UP/OK/PENDING # to UP/OK if chk.exit_status == 0 and self.last_state in (ok_up, 'PENDING'): self.unacknowledge_problem() # action in return can be notification or other checks (dependencies) if (self.state_type == 'SOFT') and self.last_state != 'PENDING': if self.is_max_attempts() and self.state_type == 'SOFT': self.state_type = 'HARD' else: self.state_type = 'SOFT' else: self.attempt = 1 self.state_type = 'HARD' # from WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN # to UP/OK elif chk.exit_status == 0 and self.last_state not in (ok_up, 'PENDING'): self.unacknowledge_problem() if self.state_type == 'SOFT': # previous check in SOFT if not chk.is_dependent(): self.add_attempt() self.raise_alert_log_entry() # Eventhandler gets OK;SOFT;++attempt, no notification needed self.get_event_handlers(hosts, macromodulations, timeperiods) # Now we are UP/OK HARD self.state_type = 'HARD' self.attempt = 1 elif self.state_type == 'HARD': # previous check in HARD self.raise_alert_log_entry() # Eventhandler and notifications get OK;HARD;maxattempts # Ok, so current notifications are not needed, we 'zombie' them self.remove_in_progress_notifications(master=True) if enable_action: self.create_notifications('RECOVERY', notification_period, hosts, services) self.get_event_handlers(hosts, macromodulations, timeperiods) # We stay in HARD self.attempt = 1 # I'm no more a problem if I was one self.no_more_a_problem(hosts, services, timeperiods, bi_modulations) # Volatile part # Only for service elif chk.exit_status != 0 and getattr(self, 'is_volatile', False): # There are no repeated attempts, so the first non-ok results # in a hard state self.attempt = 1 self.state_type = 'HARD' # status != 0 so add a log entry (before actions that can also raise log # it is smarter to log error before notification) self.raise_alert_log_entry() self.check_for_flexible_downtime(timeperiods, hosts, services) self.remove_in_progress_notifications(master=True) if enable_action: self.create_notifications('PROBLEM', notification_period, hosts, services) # Ok, event handlers here too self.get_event_handlers(hosts, macromodulations, timeperiods) # PROBLEM/IMPACT # I'm a problem only if I'm the root problem, if enable_action: self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations) # from UP/OK # to WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN elif chk.exit_status != 0 and self.last_state in (ok_up, 'PENDING'): self.attempt = 1 if self.is_max_attempts(): # Now we are in HARD self.state_type = 'HARD' self.raise_alert_log_entry() self.remove_in_progress_notifications(master=True) self.check_for_flexible_downtime(timeperiods, hosts, services) if enable_action: self.create_notifications('PROBLEM', notification_period, hosts, services) # Oh? This is the typical go for a event handler :) self.get_event_handlers(hosts, macromodulations, timeperiods) # PROBLEM/IMPACT # I'm a problem only if I'm the root problem, if enable_action: self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations) else: # This is the first NON-OK result. Initiate the SOFT-sequence # Also launch the event handler, he might fix it. self.state_type = 'SOFT' if self.is_max_attempts(): self.state_type = 'HARD' self.raise_alert_log_entry() self.get_event_handlers(hosts, macromodulations, timeperiods) # from WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN # to WARNING/CRITICAL/UNKNOWN/UNREACHABLE/DOWN elif chk.exit_status != 0 and self.last_state != ok_up: if self.state_type == 'SOFT': if not chk.is_dependent(): self.add_attempt() # Cases where go: # * warning soft => critical hard # * warning soft => critical soft if self.state != self.last_state: self.unacknowledge_problem_if_not_sticky() if self.is_max_attempts(): # Ok here is when we just go to the hard state self.state_type = 'HARD' self.raise_alert_log_entry() self.remove_in_progress_notifications(master=True) self.check_for_flexible_downtime(timeperiods, hosts, services) if enable_action: self.create_notifications('PROBLEM', notification_period, hosts, services) # So event handlers here too self.get_event_handlers(hosts, macromodulations, timeperiods) # PROBLEM/IMPACT # I'm a problem only if I'm the root problem, if enable_action: self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations) else: self.raise_alert_log_entry() # eventhandler is launched each time during the soft state self.get_event_handlers(hosts, macromodulations, timeperiods) else: # Send notifications whenever the state has changed. (W -> C) # but not if the current state is UNKNOWN (hard C-> hard U -> hard C should # not restart notifications) if self.state != self.last_state: self.update_hard_unknown_phase_state() if not self.in_hard_unknown_reach_phase and not \ self.was_in_hard_unknown_reach_phase: self.unacknowledge_problem_if_not_sticky() self.raise_alert_log_entry() self.remove_in_progress_notifications(master=True) if enable_action: self.create_notifications('PROBLEM', notification_period, hosts, services) self.get_event_handlers(hosts, macromodulations, timeperiods) elif self.in_scheduled_downtime_during_last_check is True: # during the last check I was in a downtime. but now # the status is still critical and notifications # are possible again. send an alert immediately self.remove_in_progress_notifications(master=True) if enable_action: self.create_notifications('PROBLEM', notification_period, hosts, services) # PROBLEM/IMPACT # Forces problem/impact registration even if no state change # was detected as we may have a non OK state restored from # retention data. This way, we rebuild problem/impact hierarchy. # I'm a problem only if I'm the root problem, if enable_action: self.set_myself_as_problem(hosts, services, timeperiods, bi_modulations) # case no notification exist but notifications are enabled (for example, we # enable notifications with external command) if enable_action and self.notifications_enabled and \ self.current_notification_number == 0: self.remove_in_progress_notifications(master=True) self.create_notifications('PROBLEM', notification_period, hosts, services) self.update_hard_unknown_phase_state() # Reset this flag. If it was true, actions were already taken self.in_scheduled_downtime_during_last_check = False # now is the time to update state_type_id # and our last_hard_state if self.state_type == 'HARD': self.state_type_id = 1 self.last_hard_state = self.state self.last_hard_state_id = self.state_id else: self.state_type_id = 0 # Fill last_hard_state_change to now # if we just change from SOFT->HARD or # in HARD we change of state (Warning->critical, or critical->ok, etc etc) if self.state_type == 'HARD' and \ (self.last_state_type == 'SOFT' or self.last_state != self.state): self.last_hard_state_change = int(time.time()) if self.state_type == 'HARD': # If the check is a freshness one, set freshness as expired if chk.freshness_expiry_check: self.freshness_expired = True self.last_hard_state_change = int(time.time()) # update event/problem-counters self.update_event_and_problem_id() # Raise a log if freshness check expired if chk.freshness_expiry_check: if os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- freshness expired for %s, when: %s, last checked: %s", self.get_full_name(), datetime.utcfromtimestamp( self.last_hard_state_change).strftime('%Y-%m-%d %H:%M:%S'), datetime.utcfromtimestamp( self.last_state_update).strftime('%Y-%m-%d %H:%M:%S')) self.raise_freshness_log_entry(int(now - self.last_state_update - self.freshness_threshold)) self.broks.append(self.get_check_result_brok()) self.get_perfdata_command(hosts, macromodulations, timeperiods) # Also snapshot if needed :) self.get_snapshot(hosts, macromodulations, timeperiods) return [] def update_event_and_problem_id(self): """Update current_event_id and current_problem_id Those attributes are used for macros (SERVICEPROBLEMID ...) :return: None """ ok_up = self.__class__.ok_up # OK for service, UP for host if (self.state != self.last_state and self.last_state != 'PENDING' or self.state != ok_up and self.last_state == 'PENDING'): SchedulingItem.current_event_id += 1 self.last_event_id = self.current_event_id self.current_event_id = SchedulingItem.current_event_id # now the problem_id if self.state != ok_up and self.last_state == 'PENDING': # broken ever since i can remember SchedulingItem.current_problem_id += 1 self.last_problem_id = self.current_problem_id self.current_problem_id = SchedulingItem.current_problem_id elif ok_up not in (self.state, self.last_state): # State transitions between non-OK states # (e.g. WARNING to CRITICAL) do not cause # this problem id to increase. pass elif self.state == ok_up: # If the service is currently in an OK state, # this macro will be set to zero (0). self.last_problem_id = self.current_problem_id self.current_problem_id = 0 else: # Every time a service (or host) transitions from # an OK or UP state to a problem state, a global # problem ID number is incremented by one (1). SchedulingItem.current_problem_id += 1 self.last_problem_id = self.current_problem_id self.current_problem_id = SchedulingItem.current_problem_id def prepare_notification_for_sending(self, notif, contact, macromodulations, timeperiods, host_ref): """Used by scheduler when a notification is ok to be sent (to reactionner). Here we update the command with status of now, and we add the contact to set of contact we notified. And we raise the log entry :param notif: notification to send :type notif: alignak.objects.notification.Notification :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used to get modulation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: None """ if notif.status == ACT_STATUS_POLLED: self.update_notification_command(notif, contact, macromodulations, timeperiods, host_ref) if contact.get_name() not in self.notified_contacts: self.notified_contacts.append(contact.get_name()) if contact.uuid not in self.notified_contacts_ids: self.notified_contacts_ids.append(contact.uuid) self.raise_notification_log_entry(notif, contact, host_ref) def update_notification_command(self, notif, contact, macromodulations, timeperiods, host_ref=None): """Update the notification command by resolving Macros And because we are just launching the notification, we can say that this contact has been notified :param notif: notification to send :type notif: alignak.objects.notification.Notification :param contact: contact for this host/service :type contact: alignak.object.contact.Contact :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used to get modulation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: None """ cls = self.__class__ macrosolver = MacroResolver() data = self.get_data_for_notifications(contact, notif, host_ref) notif.command = macrosolver.resolve_command(notif.command_call, data, macromodulations, timeperiods) if cls.enable_environment_macros or notif.enable_environment_macros: notif.env = macrosolver.get_env_macros(data) def is_escalable(self, notification, escalations, timeperiods): """Check if a notification can be escalated. Basically call is_eligible for each escalation :param notification: notification we would like to escalate :type notification: alignak.objects.notification.Notification :param escalations: Esclations objects, used to get escalation objects (period) :type escalations: alignak.objects.escalation.Escalations :param timeperiods: Timeperiods objects, used to get escalation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if notification can be escalated, otherwise False :rtype: bool """ cls = self.__class__ # We search since when we are in notification for escalations # that are based on time in_notif_time = time.time() - notification.creation_time # Check is an escalation match the current_notification_number for escalation_id in self.escalations: escalation = escalations[escalation_id] escalation_period = timeperiods[escalation.escalation_period] if escalation.is_eligible(notification.t_to_go, self.state, notification.notif_nb, in_notif_time, cls.interval_length, escalation_period): return True return False def get_next_notification_time(self, notif, escalations, timeperiods): # pylint: disable=too-many-locals """Get the next notification time for a notification Take the standard notification_interval or ask for our escalation if one of them need a smaller value to escalade :param notif: Notification we need time :type notif: alignak.objects.notification.Notification :param escalations: Esclations objects, used to get escalation objects (interval, period) :type escalations: alignak.objects.escalation.Escalations :param timeperiods: Timeperiods objects, used to get escalation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: Timestamp of next notification :rtype: int """ res = None now = time.time() cls = self.__class__ # Look at the minimum notification interval notification_interval = self.notification_interval # and then look for currently active notifications, and take notification_interval # if filled and less than the self value in_notif_time = time.time() - notif.creation_time for escalation_id in self.escalations: escalation = escalations[escalation_id] escalation_period = timeperiods[escalation.escalation_period] if escalation.is_eligible(notif.t_to_go, self.state, notif.notif_nb, in_notif_time, cls.interval_length, escalation_period): if escalation.notification_interval != -1 and \ escalation.notification_interval < notification_interval: notification_interval = escalation.notification_interval # So take the by default time std_time = notif.t_to_go + notification_interval * cls.interval_length # Maybe the notification comes from retention data and # next notification alert is in the past # if so let use the now value instead if std_time < now: std_time = now + notification_interval * cls.interval_length # standard time is a good one res = std_time creation_time = notif.creation_time in_notif_time = now - notif.creation_time for escalation_id in self.escalations: escalation = escalations[escalation_id] # If the escalation was already raised, we do not look for a new "early start" if escalation.get_name() not in notif.already_start_escalations: escalation_period = timeperiods[escalation.escalation_period] next_t = escalation.get_next_notif_time(std_time, self.state, creation_time, cls.interval_length, escalation_period) # If we got a real result (time base escalation), we add it if next_t is not None and now < next_t < res: res = next_t # And we take the minimum of this result. Can be standard or escalation asked return res def get_escalable_contacts(self, notification, escalations, timeperiods): """Get all contacts (uniq) from eligible escalations :param notification: Notification to get data from (notif number...) :type notification: alignak.objects.notification.Notification :param escalations: Esclations objects, used to get escalation objects (contact, period) :type escalations: alignak.objects.escalation.Escalations :param timeperiods: Timeperiods objects, used to get escalation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: Contact uuid list that can be notified for escalation :rtype: list """ cls = self.__class__ # We search since when we are in notification for escalations # that are based on this time in_notif_time = time.time() - notification.creation_time contacts = set() # new_escalations = set() for escalation_id in self.escalations: escalation = escalations[escalation_id] escalation_period = timeperiods[escalation.escalation_period] if escalation.is_eligible(notification.t_to_go, self.state, notification.notif_nb, in_notif_time, cls.interval_length, escalation_period): contacts.update(escalation.contacts) # And we tag this escalations as started now if escalation.get_name() not in notification.already_start_escalations: notification.already_start_escalations.append(escalation.get_name()) return list(contacts) def create_notifications(self, n_type, notification_period, hosts, services, t_wished=None, author_data=None): """Create a "master" notification here, which will later (immediately before the reactionner gets it) be split up in many "child" notifications, one for each contact. :param n_type: notification type ("PROBLEM", "RECOVERY" ...) :type n_type: str :param notification_period: notification period for this host/service :type notification_period: alignak.objects.timeperiod.Timeperiod :param hosts: hosts objects, used to check if a notif is blocked :type hosts: alignak.objects.host.Hosts :param services: services objects, used to check if a notif is blocked :type services: alignak.objects.service.Services :param t_wished: time we want to notify :type t_wished: int :param author_data: notification author data (eg. for a downtime notification) :type author_data: dict (containing author, author_name ad a comment) :return: None """ cls = self.__class__ # t_wished==None for the first notification launch after consume # here we must look at the self.notification_period if t_wished is None: t_wished = time.time() # if first notification, we must add first_notification_delay if self.current_notification_number == 0 and n_type == 'PROBLEM': last_time_non_ok_or_up = self.last_time_non_ok_or_up() if last_time_non_ok_or_up: # last_time_non_ok_or_up is an integer value - set the next second t_wished = last_time_non_ok_or_up + 1 t_wished = t_wished + self.first_notification_delay * cls.interval_length if notification_period is None: new_t = t_wished else: new_t = notification_period.get_next_valid_time_from_t(t_wished) else: # We follow our order new_t = t_wished if self.is_blocking_notifications(notification_period, hosts, services, n_type, t_wished) and \ self.first_notification_delay == 0 and self.notification_interval == 0: # If notifications are blocked on the host/service level somehow # and repeated notifications are not configured, # we can silently drop this one return if n_type == u'PROBLEM': # Create the notification with an incremented notification_number. # The current_notification_number of the item itself will only # be incremented when this notification (or its children) # have actually been sent. next_notif_nb = self.current_notification_number + 1 elif n_type == u'RECOVERY': # Recovery resets the notification counter to zero self.current_notification_number = 0 next_notif_nb = self.current_notification_number else: # downtime/flap/etc do not change the notification number next_notif_nb = self.current_notification_number data = { 'status': u'scheduled', 'type': n_type, 'command': u'VOID', 'ref': self.uuid, 't_to_go': new_t, 'timeout': cls.notification_timeout, 'notif_nb': next_notif_nb, 'host_name': getattr(self, 'host_name', ''), 'service_description': getattr(self, 'service_description', ''), } if author_data and n_type in [u'DOWNTIMESTART', u'DOWNTIMEEND']: data.update(author_data) notif = Notification(data) logger.debug("Created a %s notification: %s", self.my_type, n_type) # Keep a trace in our notifications queue self.notifications_in_progress[notif.uuid] = notif # and put it in our queue for the scheduler to pick it up self.actions.append(notif) def scatter_notification(self, notif, contacts, notifways, timeperiods, macromodulations, escalations, host_ref): # pylint: disable=too-many-locals, too-many-boolean-expressions, too-many-branches """In create_notifications we created a notification master (eg. a template). When it's time to hand it over to the reactionner, this master notification needs to be split in several child notifications, one for each contact To be more exact, one for each contact who is willing to accept notifications of this type and at this time :param notif: Notification to scatter :type notif: alignak.objects.notification.Notification :param contacts: Contacts objects, used to retreive contact for this object :type contacts: alignak.objects.contact.Contacts :param notifways: Notificationway objects, used to get notific commands :type notifways: alignak.object.notificationway.Notificationways :param timeperiods: Timeperiods objects, used to check if notif are allowed at this time :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param escalations: Esclations objects, used to get escalated contacts :type escalations: alignak.objects.escalation.Escalations :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: child notifications :rtype: list[alignak.objects.notification.Notification] """ if notif.contact: # only master notifications can be split up return [] cls = self.__class__ childnotifications = [] escalated = False notification_contacts = [] if notif.type == u'RECOVERY': if self.first_notification_delay != 0 and not self.notified_contacts_ids: # Recovered during first_notification_delay. No notifications # have been sent yet, so we keep quiet notification_contacts = [] else: # The old way. Only send recover notifications to those contacts # who also got problem notifications notification_contacts = [c_id for c_id in self.notified_contacts_ids] self.notified_contacts = [] self.notified_contacts_ids = [] else: # Check if an escalation match. If so, get all contacts from escalations if self.is_escalable(notif, escalations, timeperiods): notification_contacts = self.get_escalable_contacts(notif, escalations, timeperiods) escalated = True # else take normal contacts else: # notif_contacts = [contacts[c_id] for c_id in self.contacts] notification_contacts = self.contacts recipients = [] recipients_names = set() for contact_uuid in notification_contacts: # We do not want to notify again a contact with notification interval == 0 # if has been already notified except if the item hard state changed! # This can happen when a service exits a downtime and it is still in # critical/warning (and not acknowledge) if notif.type == u'PROBLEM' and self.notification_interval == 0 \ and self.state_type == 'HARD' and self.last_state_type == self.state_type \ and self.state == self.last_state \ and contact_uuid in self.notified_contacts_ids: # Do not send notification continue recipients.append(contact_uuid) recipients_names.add(contacts[contact_uuid].contact_name) for contact_uuid in recipients: contact = contacts[contact_uuid] # Get the property name for notification commands, like # service_notification_commands for service notif_commands = contact.get_notification_commands(notifways, cls.my_type) for cmd in notif_commands: data = { 'type': notif.type, 'command': u'VOID', 'command_call': cmd, 'ref': self.uuid, 'contact': contact.uuid, 'contact_name': contact.contact_name, 'recipients': ','.join(recipients_names), 't_to_go': notif.t_to_go, 'escalated': escalated, 'timeout': cls.notification_timeout, 'notif_nb': notif.notif_nb, 'reactionner_tag': cmd.reactionner_tag, 'enable_environment_macros': cmd.enable_environment_macros, 'host_name': getattr(self, 'host_name', ''), 'service_description': getattr(self, 'service_description', ''), 'author': notif.author, 'author_name': notif.author_name, 'author_alias': notif.author_alias, 'author_comment': notif.author_comment } child_n = Notification(data) if not self.notification_is_blocked_by_contact(notifways, timeperiods, child_n, contact): # Update the notification with fresh status information # of the item. Example: during the notification_delay # the status of a service may have changed from WARNING to CRITICAL self.update_notification_command(child_n, contact, macromodulations, timeperiods, host_ref) self.raise_notification_log_entry(child_n, contact, host_ref) self.notifications_in_progress[child_n.uuid] = child_n childnotifications.append(child_n) if notif.type == u'PROBLEM': # Remember the contacts. We might need them later in the # recovery code some lines above if contact.uuid not in self.notified_contacts_ids: self.notified_contacts_ids.append(contact.uuid) if contact.get_name() not in self.notified_contacts: self.notified_contacts.append(contact.get_name()) return childnotifications def launch_check(self, timestamp, hosts, services, timeperiods, macromodulations, checkmodulations, checks, ref_check=None, force=False, dependent=False): # pylint: disable=too-many-locals, too-many-arguments # pylint: disable=too-many-branches, too-many-return-statements """Launch a check (command) :param hosts: list of all hosts :param services: list of all services :param timeperiods: list of all time periods :param checks: list of all current checks :param timestamp: :type timestamp: int :param macromodulations: list of all macro modulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param ref_check: :type ref_check: :param force: :type force: bool :param dependent: :type dependent: bool :return: None or alignak.check.Check :rtype: None | alignak.check.Check """ chk = None cls = self.__class__ # Look if we are in check or not self.update_in_checking() # the check is being forced, so we just replace next_chk time by now if force and self.in_checking: try: c_in_progress = checks[self.checks_in_progress[0]] c_in_progress.t_to_go = time.time() return c_in_progress except KeyError: pass # If I'm already in checking, Why launch a new check? # If ref_check_id is not None , this is a dependency_ check # If none, it might be a forced check, so OK, I do a new # Dependency check, we have to create a new check that will be launched only once (now) # Otherwise it will delay the next real check. this can lead to an infinite SOFT state. if not force and (self.in_checking and ref_check is not None): c_in_progress = checks[self.checks_in_progress[0]] # todo: create a copy function in the Check class # c_in_progress has almost everything we need but we cant copy.deepcopy() it # we need another c.uuid data = { 'command': c_in_progress.command, 'timeout': c_in_progress.timeout, 'poller_tag': c_in_progress.poller_tag, 'env': c_in_progress.env, 'module_type': c_in_progress.module_type, 't_to_go': timestamp, 'depend_on_me': [ref_check], 'ref': self.uuid, 'ref_type': self.my_type, 'dependency_check': True, 'internal': self.got_business_rule or c_in_progress.command.startswith('_') } chk = Check(data) self.actions.append(chk) if os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- -> added a check action for %s (%s)", self.get_full_name(), chk.uuid) return chk if force or (not self.is_no_check_dependent(hosts, services, timeperiods)): if self.my_type == 'service' and not self.check_command: # This should never happen because of configuration check! logger.debug("Service check is for a service that has no check command (%s/%s), " "do not launch the check !", self.host_name, self.service_description) return None if self.my_type == 'host' and not self.check_command: if self.active_checks_enabled: logger.debug("Host check is for an host that has no check command (%s), " "do not launch the check !", self.host_name) return None # Fred : passive only checked host dependency if dependent and self.my_type == 'host' and \ self.passive_checks_enabled and not self.active_checks_enabled: logger.debug("Host check (dependent) is for an host that is only passively " "checked (%s), do not launch the check !", self.host_name) return None # By default env is void env = {} poller_tag = u'None' module_type = None # By default we will use our default check_command self.last_check_command = None check_command = self.check_command command_line = '' if check_command: poller_tag = check_command.poller_tag module_type = check_command.module_type # But if a checkway is available, use this one instead. # Take the first available for chkmod_id in self.checkmodulations: chkmod = checkmodulations[chkmod_id] c_cw = chkmod.get_check_command(timeperiods, timestamp) if c_cw: check_command = c_cw break # Get the command to launch mr = MacroResolver() data = self.get_data_for_checks(hosts) command_line = mr.resolve_command(check_command, data, macromodulations, timeperiods) # remember it, for pure debugging purpose self.last_check_command = command_line # And get all environment variables only if needed if cls.enable_environment_macros or (check_command and check_command.enable_environment_macros): env = mr.get_env_macros(data) # By default we take the global timeout, but we use the command one if it # is defined (default is -1 for no timeout) timeout = cls.check_timeout if check_command and check_command.timeout != -1: timeout = check_command.timeout # Build the Check object and put the service in checking data = { 'command': command_line, 'timeout': timeout, 'poller_tag': poller_tag, 'env': env, 'module_type': module_type, 't_to_go': timestamp, 'depend_on_me': [ref_check] if ref_check else [], 'ref': self.uuid, 'ref_type': self.my_type, 'internal': self.got_business_rule or command_line.startswith('_') } chk = Check(data) self.checks_in_progress.append(chk.uuid) self.update_in_checking() # We need to put this new check in our actions queue # so scheduler can take it if chk is not None: self.actions.append(chk) if os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- -> added a check action for %s (%s)", self.get_full_name(), chk.uuid) return chk # None mean I already take it into account return None def get_time_to_orphanage(self): """Get time to orphanage :: * 0 : don't check for orphans * non zero : number of secs that can pass before marking the check an orphan. :return: integer with the meaning explained above :rtype: int """ # if disabled program-wide, disable it if not self.check_for_orphaned: return 0 # otherwise, check what my local conf says if self.time_to_orphanage <= 0: return 0 return self.time_to_orphanage def get_perfdata_command(self, hosts, macromodulations, timeperiods): """Add event_handler to process performance data if necessary (not disabled) :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :return: None """ cls = self.__class__ if not cls.process_performance_data or not self.process_perf_data: return if cls.perfdata_command: mr = MacroResolver() data = self.get_data_for_event_handler(hosts) cmd = mr.resolve_command(cls.perfdata_command, data, macromodulations, timeperiods) reactionner_tag = cls.perfdata_command.reactionner_tag event_h = EventHandler({ 'command': cmd, 'timeout': cls.perfdata_timeout, 'ref': self.uuid, 'reactionner_tag': reactionner_tag }) # ok we can put it in our temp action queue self.actions.append(event_h) def create_business_rules(self, hosts, services, hostgroups, servicegroups, macromodulations, timeperiods, running=False): # pylint: disable=too-many-locals """Create business rules if necessary (cmd contains bp_rule) :param hosts: Hosts object to look for objects :type hosts: alignak.objects.host.Hosts :param services: Services object to look for objects :type services: alignak.objects.service.Services :param running: flag used in eval_cor_pattern function :type running: bool :return: None """ cmd_call = getattr(self, 'check_command', None) if cmd_call is None: return # we get our base command, like # bp_rule!(host,svc & host, svc) -> bp_rule # cmd = cmd_call.call # elts = cmd.split('!') # base_cmd = elts[0] if cmd_call.command_name not in ['bp_rule']: return # If it's bp_rule, we got a rule :) self.got_business_rule = True rule = '' if cmd_call.args: rule = '!'.join(cmd_call.args) # Only (re-)evaluate the business rule if it has never been # evaluated before, or it contains a macro. if self.business_rule is None or re.match(r"\$[\w\d_-]+\$", rule): mr = MacroResolver() rule = mr.resolve_simple_macros_in_string(rule, self.get_data_for_checks(hosts), macromodulations, timeperiods) if rule == getattr(self, "processed_business_rule", None): # Business rule did not changed (no macro was modulated) return self.processed_business_rule = rule fact = DependencyNodeFactory(self) self.business_rule = fact.eval_cor_pattern(rule, hosts, services, hostgroups, servicegroups, running) def get_business_rule_output(self, hosts, services, macromodulations, timeperiods): # pylint: disable=too-many-locals, too-many-branches """ Returns a status string for business rules based items formatted using business_rule_output_template attribute as template. The template may embed output formatting for itself, and for its child (dependent) items. Child format string is expanded into the $( and )$, using the string between brackets as format string. Any business rule based item or child macro may be used. In addition, the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common to hosts and services may be used to ease template writing. Caution: only children in state not OK are displayed. Example: A business rule with a format string looking like "$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]" Would return "CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]" :param hosts: Hosts object to look for objects :type hosts: alignak.objects.host.Hosts :param services: Services object to look for objects :type services: alignak.objects.service.Services :param macromodulations: Macromodulations object to look for objects :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods object to look for objects :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: status for business rules :rtype: str """ got_business_rule = getattr(self, 'got_business_rule', False) # Checks that the service is a business rule. if got_business_rule is False or self.business_rule is None: return "" # Checks that the business rule has a format specified. output_template = self.business_rule_output_template if not output_template: return "" mr = MacroResolver() # Extracts children template strings elts = re.findall(r"\$\((.*)\)\$", output_template) if not elts: child_template_string = "" else: child_template_string = elts[0] # Processes child services output children_output = "" ok_count = 0 # Expands child items format string macros. items = self.business_rule.list_all_elements() for item_uuid in items: if item_uuid in hosts: item = hosts[item_uuid] elif item_uuid in services: item = services[item_uuid] # Do not display children in OK state # todo: last_hard_state ? why not current state if state type is hard ? if item.last_hard_state_id == 0: ok_count += 1 continue data = item.get_data_for_checks(hosts) children_output += mr.resolve_simple_macros_in_string(child_template_string, data, macromodulations, timeperiods) if ok_count == len(items): children_output = "all checks were successful." # Replaces children output string template_string = re.sub(r"\$\(.*\)\$", children_output, output_template) data = self.get_data_for_checks(hosts) output = mr.resolve_simple_macros_in_string(template_string, data, macromodulations, timeperiods) return output.strip() def business_rule_notification_is_blocked(self, hosts, services): # pylint: disable=too-many-locals """Process business rule notifications behaviour. If all problems have been acknowledged, no notifications should be sent if state is not OK. By default, downtimes are ignored, unless explicitly told to be treated as acknowledgements through with the business_rule_downtime_as_ack set. :return: True if all source problem are acknowledged, otherwise False :rtype: bool """ # Walks through problems to check if all items in non ok are # acknowledged or in downtime period. acknowledged = 0 for src_prob_id in self.source_problems: if src_prob_id in hosts: src_prob = hosts[src_prob_id] else: src_prob = services[src_prob_id] if src_prob.last_hard_state_id != 0: if src_prob.problem_has_been_acknowledged: # Problem hast been acknowledged acknowledged += 1 # Only check problems under downtime if we are # explicitly told to do so. elif self.business_rule_downtime_as_ack is True: if src_prob.scheduled_downtime_depth > 0: # Problem is under downtime, and downtimes should be # treated as acknowledgements acknowledged += 1 elif hasattr(src_prob, "host") and \ hosts[src_prob.host].scheduled_downtime_depth > 0: # Host is under downtime, and downtimes should be # treated as acknowledgements acknowledged += 1 return acknowledged == len(self.source_problems) def manage_internal_check(self, hosts, services, check, hostgroups, servicegroups, macromodulations, timeperiods): # pylint: disable=too-many-branches, too-many-statements, too-many-locals """Manage internal commands such as :: * bp_rule * _internal_host_up * _echo :param hosts: Used to create business rules :type hosts: alignak.objects.host.Hosts :param services: Used to create business rules :type services: alignak.objects.service.Services :param check: internal check to manage :type check: alignak.objects.check.Check :return: None """ logger.debug("Internal check: %s - %s", self.get_full_name(), check.command) if os.getenv('ALIGNAK_LOG_CHECKS', None): logger.info("--ALC-- Internal check: %s - %s", self.get_full_name(), check.command) # Default is unknown state state = 3 # Business rule if check.command.startswith('bp_'): try: # Re evaluate the business rule to take into account macro modulation. # Caution: We consider that the macro modulation did not # change business rule dependency tree. Only Xof: values should # be modified by modulation. self.create_business_rules(hosts, services, hostgroups, servicegroups, macromodulations, timeperiods, running=True) state = self.business_rule.get_state(hosts, services) check.output = self.get_business_rule_output(hosts, services, macromodulations, timeperiods) if os.getenv('ALIGNAK_LOG_ACTIONS', None): log_function = logger.info if os.getenv('ALIGNAK_LOG_ACTIONS') == 'WARNING': log_function = logger.warning log_function("Resolved BR for '%s', output: %s", self.get_full_name(), check.output) except Exception as exp: # pylint: disable=broad-except # Notifies the error, and return an UNKNOWN state. check.output = u"Error while re-evaluating business rule: %s" % exp logger.debug("[%s] Error while re-evaluating business rule:\n%s", self.get_name(), traceback.format_exc()) state = 3 # _internal_host_up is for putting host as UP elif check.command == '_internal_host_up': state = 0 check.execution_time = 0 check.output = u'Host assumed to be UP' if os.getenv('ALIGNAK_LOG_ACTIONS', None): log_function = logger.info if os.getenv('ALIGNAK_LOG_ACTIONS') == 'WARNING': log_function = logger.warning log_function("Set host %s as UP (internal check)", self.get_full_name()) # Echo is just putting the same state again elif check.command == '_echo': state = self.state_id check.execution_time = 0 check.output = self.output if os.getenv('ALIGNAK_LOG_ACTIONS', None): log_function = logger.info if os.getenv('ALIGNAK_LOG_ACTIONS') == 'WARNING': log_function = logger.warning log_function("Echo the current state (%s - %d) for %s ", self.state, self.state_id, self.get_full_name()) # _internal_host_check is for having an host check result # without running a check plugin elif check.command.startswith('_internal_host_check'): # Command line contains: state_id;output check_result = check.command.split(';') if len(check_result) < 2: state = 3 check.output = u'Malformed host internal check' else: # In SOFT state type, do not change current state - let the new state go to HARD if self.state_type == 'SOFT': state = self.state_id else: state = check_result[1].strip() # If multiple possible states - choose a random one if ',' in check_result[1]: states = check_result[1].split(',') if len(states) > 5: states = states[0:5] probability = { 2: [0.8, 0.2], 3: [0.7, 0.2, 0.1], 4: [0.6, 0.2, 0.1, 0.1], 5: [0.4, 0.2, 0.2, 0.1, 0.1] } probability = probability.get(len(states)) try: state = numpy.random.choice(states, p=probability) except Exception: # pylint: disable=broad-except # If random configuration error, do not change the state logger.warning("Randomly chosen state is not configured correctly " "for %s: %s", self.get_full_name(), state) # state = self.state_id state = random.choice(states) try: state = int(state) except ValueError: pass check.output = u'Host internal check result: %d' % state if len(check_result) > 2 and check_result[2]: check.output = check_result[2] if '%d' in check.output: check.output = check.output % state check.execution_time = 0 if os.getenv('ALIGNAK_INTERNAL_HOST_PERFDATA', None): try: max_range = int(os.getenv('ALIGNAK_INTERNAL_HOST_PERFDATA')) except ValueError: max_range = 10 check.perf_data = "'rnd_metric'=%d" % random.randint(0, max_range) if os.getenv('ALIGNAK_LOG_ACTIONS', None): log_function = logger.info if os.getenv('ALIGNAK_LOG_ACTIONS') == 'WARNING': log_function = logger.warning log_function("Host %s internal check: %d - %s", self.get_full_name(), state, check.output) # _internal_service_check is for having a service check result # without running a check plugin elif check.command.startswith('_internal_service_check'): # Command line contains: state_id;output check_result = check.command.split(';') if not check_result: state = 3 check.output = u'Malformed service internal check' else: # In SOFT state type, do not change current state - let the new state go to HARD if self.state_type == 'SOFT': state = self.state_id else: state = check_result[1].strip() # If multiple possible states - choose a random one if ',' in check_result[1]: states = check_result[1].split(',') if len(states) > 5: states = states[0:5] probability = { 2: [0.8, 0.2], 3: [0.7, 0.2, 0.1], 4: [0.6, 0.2, 0.1, 0.1], 5: [0.4, 0.2, 0.2, 0.1, 0.1] } probability = probability.get(len(states)) try: state = numpy.random.choice(states, p=probability) except Exception as exp: # pylint: disable=broad-except # If random configuration error, do not change the state logger.warning("Randomly chosen state is not configured correctly " "for %s: %s", self.get_full_name(), state) state = self.state_id try: state = int(state) except ValueError: pass check.output = u'Service internal check result: %d' % state if len(check_result) > 2 and check_result[2]: check.output = check_result[2] if '%d' in check.output: check.output = check.output % state check.execution_time = 0 if os.getenv('ALIGNAK_INTERNAL_SERVICE_PERFDATA', None): try: max_range = int(os.getenv('ALIGNAK_INTERNAL_SERVICE_PERFDATA')) except ValueError: max_range = 10 check.perf_data = "'rnd_metric'=%d" % random.randint(0, max_range) if os.getenv('ALIGNAK_LOG_ACTIONS', None): log_function = logger.info if os.getenv('ALIGNAK_LOG_ACTIONS') == 'WARNING': log_function = logger.warning log_function("Service %s internal check: %d - %s", self.get_full_name(), state, check.output) check.long_output = check.output check.check_time = time.time() check.exit_status = state def fill_data_brok_from(self, data, brok_type): """Fill data brok dependent on the brok_type :param data: data to fill :type data: dict :param brok_type: brok type :type: str :return: None """ super(SchedulingItem, self).fill_data_brok_from(data, brok_type) # workaround/easy trick to have the command_name of this # SchedulingItem in its check_result brok if brok_type == CHECK_RESULT: data['command_name'] = '' if self.check_command: data['command_name'] = self.check_command.command.command_name def acknowledge_problem(self, notification_period, hosts, services, sticky, notify, author, comment, end_time=0): # pylint: disable=too-many-arguments """ Add an acknowledge :param sticky: acknowledge will be always present is host return in UP state :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end) :type end_time: int :return: None | alignak.comment.Comment """ comm = None logger.debug("Acknowledge requested for %s %s.", self.my_type, self.get_name()) if self.state != self.ok_up: # case have yet an acknowledge if self.problem_has_been_acknowledged and self.acknowledgement: self.del_comment(getattr(self.acknowledgement, 'comment_id', None)) if notify: self.create_notifications('ACKNOWLEDGEMENT', notification_period, hosts, services) self.problem_has_been_acknowledged = True sticky = sticky == 2 data = { 'ref': self.uuid, 'sticky': sticky, 'author': author, 'comment': comment, 'end_time': end_time, 'notify': notify } self.acknowledgement = Acknowledge(data) if self.my_type == 'host': comment_type = 1 self.broks.append(self.acknowledgement.get_raise_brok(self.get_name())) else: comment_type = 2 self.broks.append(self.acknowledgement.get_raise_brok(self.host_name, self.get_name())) data = { 'author': author, 'comment': comment, 'comment_type': comment_type, 'entry_type': 4, 'source': 0, 'expires': False, 'ref': self.uuid } comm = Comment(data) self.acknowledgement.comment_id = comm.uuid self.comments[comm.uuid] = comm self.broks.append(self.get_update_status_brok()) self.raise_acknowledge_log_entry() else: logger.debug("Acknowledge requested for %s %s but element state is OK/UP.", self.my_type, self.get_name()) # For an host, acknowledge all its services that are problems if self.my_type == 'host': for service_uuid in self.services: if service_uuid not in services: continue services[service_uuid].acknowledge_problem(notification_period, hosts, services, sticky, notify, author, comment, end_time) return comm def check_for_expire_acknowledge(self): """ If have acknowledge and is expired, delete it :return: None """ if (self.acknowledgement and self.acknowledgement.end_time != 0 and self.acknowledgement.end_time < time.time()): self.unacknowledge_problem() def unacknowledge_problem(self): """ Remove the acknowledge, reset the flag. The comment is deleted :return: None """ if self.problem_has_been_acknowledged: logger.debug("[item::%s] deleting acknowledge of %s", self.get_name(), self.get_full_name()) self.problem_has_been_acknowledged = False if self.my_type == 'host': self.broks.append(self.acknowledgement.get_expire_brok(self.get_name())) else: self.broks.append(self.acknowledgement.get_expire_brok(self.host_name, self.get_name())) # delete the comment of the item related with the acknowledge if hasattr(self.acknowledgement, 'comment_id') and \ self.acknowledgement.comment_id in self.comments: del self.comments[self.acknowledgement.comment_id] # Should not be deleted, a None is Good self.acknowledgement = None self.broks.append(self.get_update_status_brok()) self.raise_unacknowledge_log_entry() def unacknowledge_problem_if_not_sticky(self): """ Remove the acknowledge if it is not sticky :return: None """ if hasattr(self, 'acknowledgement') and self.acknowledgement is not None: if not self.acknowledgement.sticky: self.unacknowledge_problem() def raise_check_result(self): # pragma: no cover, base function """Raise ACTIVE CHECK RESULT entry Function defined in inherited objects (Host and Service) :return: None """ pass def raise_alert_log_entry(self): # pragma: no cover, base function """Raise ALERT entry Function defined in inherited objects (Host and Service) :return: None """ pass def raise_acknowledge_log_entry(self): # pragma: no cover, base function """Raise ACKNOWLEDGE STARTED entry Function defined in inherited objects (Host and Service) :return: None """ pass def raise_unacknowledge_log_entry(self): # pragma: no cover, base function """Raise ACKNOWLEDGE STOPPED entry Function defined in inherited objects (Host and Service) :return: None """ pass def is_state(self, status): # pragma: no cover, base function """Return if status match the current item status :param status: status to compare. Usually comes from config files :type status: str :return: True :rtype: bool """ pass def raise_freshness_log_entry(self, t_stale_by): """Raise freshness alert entry (warning level) Example : "The freshness period of host 'host_name' is expired by 0d 0h 17m 6s (threshold=0d 1h 0m 0s). Attempt: 1 / 1. I'm forcing the state to freshness state (d / HARD)" :param t_stale_by: time in seconds the host has been in a stale state :type t_stale_by: int :return: None """ logger.warning("The freshness period of %s '%s' is expired by %ss " "(threshold=%ss + %ss). Attempt: %s / %s. " "I'm forcing the state to freshness state (%s / %s).", self.my_type, self.get_full_name(), t_stale_by, self.freshness_threshold, self.additional_freshness_latency, self.attempt, self.max_check_attempts, self.freshness_state, self.state_type) def raise_snapshot_log_entry(self, command): # pragma: no cover, base function """Raise item SNAPSHOT entry (critical level) Format is : "ITEM SNAPSHOT: *self.get_name()*;*state*;*state_type*;*attempt*; *command.get_name()*" Example : "HOST SNAPSHOT: server;UP;HARD;1;notify-by-rss" :param command: Snapshot command launched :type command: alignak.objects.command.Command :return: None """ pass def raise_flapping_start_log_entry(self, change_ratio, threshold): # pragma: no cover, # base function """Raise FLAPPING ALERT START entry (critical level) :param change_ratio: percent of changing state :type change_ratio: float :param threshold: threshold (percent) to trigger this log entry :type threshold: float :return: None """ pass def raise_event_handler_log_entry(self, command): # pragma: no cover, base function """Raise EVENT HANDLER entry (critical level) :param command: Handler launched :type command: alignak.objects.command.Command :return: None """ pass def raise_flapping_stop_log_entry(self, change_ratio, threshold): # pragma: no cover, # base function """Raise FLAPPING ALERT STOPPED entry (critical level) :param change_ratio: percent of changing state :type change_ratio: float :param threshold: threshold (percent) to trigger this log entry :type threshold: float :return: None """ pass def raise_notification_log_entry(self, notif, contact, host_ref): # pragma: no cover, # base function """Raise NOTIFICATION entry (critical level) :param notif: notification object created by service alert :type notif: alignak.objects.notification.Notification :return: None """ pass # pylint: disable=unused-argument def get_data_for_checks(self, hosts): """Get data for a check :return: list containing a single host (this one) :rtype: list """ return [self] # pylint: disable=unused-argument def get_data_for_event_handler(self, hosts): """Get data for an event handler :return: list containing a single host (this one) :rtype: list """ return [self] # pylint: disable=unused-argument def get_data_for_notifications(self, contact, notif, host_ref): """Get data for a notification :param contact: The contact to return :type contact: :param notif: the notification to return :type notif: :return: list containing the host and the given parameters :rtype: list """ return [self, contact, notif] def set_impact_state(self): """We just go an impact, so we go unreachable But only if we enable this state change in the conf :return: None """ cls = self.__class__ if cls.enable_problem_impacts_states_change: logger.debug("%s is impacted and goes UNREACHABLE", self) # Track the old state (problem occured before a new check) self.state_before_impact = self.state self.state_id_before_impact = self.state_id # This flag will know if we override the impact state self.state_changed_since_impact = False # Set unreachable self.set_unreachable() def unset_impact_state(self): """Unset impact, only if impact state change is set in configuration :return: None """ cls = self.__class__ if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact: self.state = self.state_before_impact self.state_id = self.state_id_before_impact def set_unreachable(self): """Set unreachable: all our parents (dependencies) are not ok Unreachable is different from down/critical :return:None """ self.state_id = 4 self.state = u'UNREACHABLE' self.last_time_unreachable = int(time.time()) def manage_stalking(self, check): # pragma: no cover, base function """Check if the item need stalking or not (immediate recheck) :param check: finished check (check.status == 'waitconsume') :type check: alignak.check.Check :return: None """ pass def set_state_from_exit_status(self, status, notif_period, hosts, services): """Set the state with the status of a check. Also update last_state :param status: integer between 0 and 3 :type status: int :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :return: None """ pass def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished): # pragma: no cover, base function """Check if a notification is blocked by item :param n_type: notification type :type n_type: :param t_wished: the time we should like to notify the host (mostly now) :type t_wished: float :return: True if ONE of the above condition was met, otherwise False :rtype: bool """ pass def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact): # pragma: no cover, base function """Check if the notification is blocked by this contact. :param notif: notification created earlier :type notif: alignak.notification.Notification :param contact: contact we want to notify :type notif: alignak.objects.contact.Contact :return: True if the notification is blocked, False otherwise :rtype: bool """ pass def is_correct(self): # pylint: disable=too-many-branches """ Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool """ if hasattr(self, 'trigger') and getattr(self, 'trigger', None): self.add_warning("'trigger' property is not allowed") # If no notif period, set it to None, mean 24x7 if not hasattr(self, 'notification_period'): self.notification_period = None # If freshness_threshold is not set, use check interval or retry interval if hasattr(self, 'freshness_threshold') and not self.freshness_threshold: if getattr(self, 'check_interval', 0): self.freshness_threshold = self.check_interval * 60 elif getattr(self, 'retry_interval', 0): self.freshness_threshold = self.retry_interval * 60 # If we got an event handler, it should be valid if getattr(self, 'event_handler', None) and not self.event_handler.is_valid(): self.add_error("event_handler '%s' is invalid" % self.event_handler.command) if not hasattr(self, 'check_command'): # todo: This should never happen because the default exists as an empty string self.add_error("no check_command property") # Ok got a command, but maybe it's invalid else: # if not self.check_command: # self.add_warning("[%s::%s] no check_command, will always be considered as Up" # % (self.my_type, self.get_name())) if self.check_command and not self.check_command.is_valid(): self.add_error("check_command '%s' invalid" % self.check_command.command) # state = False if self.got_business_rule: if not self.business_rule.is_valid(): self.add_error("business_rule invalid") for business_rule_error in self.business_rule.configuration_errors: self.add_error("%s" % business_rule_error) if not hasattr(self, 'notification_interval') \ and getattr(self, 'notifications_enabled', None) is True: self.add_error("no notification_interval but notifications enabled") # if no check_period, means 24x7, like for services if not hasattr(self, 'check_period'): self.check_period = None return super(SchedulingItem, self).is_correct() and self.conf_is_correct
class SchedulingItem(Item): '''SchedulingItem class provide method for Scheduler to handle Service or Host objects ''' def __init__(self, params, parsing=True): pass @property def monitored(self): '''Simple property renaming for better API;)''' pass @property def last_check(self): '''Simple property renaming for better API;)''' pass @property def next_check(self): '''Simple property renaming for better API;)''' pass @property def acknowledged(self): '''Simple property renaming for better API;)''' pass @property def downtimed(self): '''Simple property renaming for better API;)''' pass def get_check_command(self): '''Wrapper to get the name of the check_command attribute :return: check_command name :rtype: str ''' pass def change_check_command(self, command, commands): ''' Change the check command :param command: the new command :type command: str :param commands: the available command items :type command: alignak.objecrs.command.Commands :return: ''' pass def change_event_handler(self, command, commands): ''' Change the event handler command :param command: the new command :type command: str :param commands: the available command items :type command: alignak.objecrs.command.Commands :return: ''' pass def change_snapshot_command(self, command, commands): ''' Change the snapshot command :param command: the new command :type command: str :param commands: the available command items :type command: alignak.objecrs.command.Commands ''' pass def add_flapping_change(self, sample): '''Add a flapping sample and keep cls.flap_history samples :param sample: Sample to add :type sample: bool :return: None ''' pass def update_flapping(self, notif_period, hosts, services): '''Compute the sample list (self.flapping_changes) and determine whether the host/service is flapping or not :param notif_period: notification period object for this host/service :type notif_period: alignak.object.timeperiod.Timeperiod :param hosts: Hosts objects, used to create notification if necessary :type hosts: alignak.objects.host.Hosts :param services: Services objects, used to create notification if necessary :type services: alignak.objects.service.Services :return: None :rtype: Nonetype ''' pass def add_attempt(self): '''Add an attempt when a object is a non-ok state :return: None ''' pass def is_max_attempts(self): '''Check if max check attempt is reached :return: True if self.attempt >= self.max_check_attempts, otherwise False :rtype: bool ''' pass def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations, checks, when): '''Check freshness and schedule a check now if necessary. This function is called by the scheduler if Alignak is configured to check the freshness. It is called for hosts that have the freshness check enabled if they are only passively checked. It is called for services that have the freshness check enabled if they are only passively checked and if their depending host is not in a freshness expired state (freshness_expiry = True). A log is raised when the freshess expiry is detected and the item is set as freshness_expiry. :param hosts: hosts objects, used to launch checks :type hosts: alignak.objects.host.Hosts :param services: services objects, used launch checks :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get check_period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: A check or None :rtype: None | object ''' pass def set_myself_as_problem(self, hosts, services, timeperiods, bi_modulations): ''' Raise all impact from my error. I'm setting myself as a problem, and I register myself as this in all hosts/services that depend_on_me. So they are now my impacts :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get act_depend_of_me timeperiod :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulations objects :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None ''' pass def update_business_impact_value(self, hosts, services, timeperiods, bi_modulations): '''We update our 'business_impact' value with the max of the impacts business_impact if we got impacts. And save our 'configuration' business_impact if we do not have do it before If we do not have impacts, we revert our value :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get modulation_period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulations objects :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None ''' pass def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations): '''Remove this objects as an impact for other schedulingitem. :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for update_business_impact_value :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None ''' pass def register_a_problem(self, prob, hosts, services, timeperiods, bi_modulations): '''Call recursively by potentials impacts so they update their source_problems list. But do not go below if the problem is not a real one for me like If I've got multiple parents for examples :param prob: problem to register :type prob: alignak.objects.schedulingitem.SchedulingItem :param hosts: hosts objects, used to get object in act_depend_of_me :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of_me :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: list of host/service that are impacts :rtype: list[alignak.objects.schedulingitem.SchedulingItem] ''' pass def unregister_a_problem(self, prob): '''Remove the problem from our problems list and check if we are still 'impacted' :param prob: problem to remove :type prob: alignak.objects.schedulingitem.SchedulingItem :return: None ''' pass def is_enable_action_dependent(self, hosts, services): ''' Check if dependencies states match dependencies statuses This basically means that a dependency is in a bad state and it can explain this object state. :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: True if all dependencies matches the status, false otherwise :rtype: bool ''' pass def check_and_set_unreachability(self, hosts, services): ''' Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None ''' pass def do_i_raise_dependency(self, status, inherit_parents, hosts, services, timeperiods): '''Check if this object or one of its dependency state (chk dependencies) match the status :param status: state list where dependency matters (notification failure criteria) :type status: list :param inherit_parents: recurse over parents :type inherit_parents: bool :param hosts: hosts objects, used to raise dependency check :type hosts: alignak.objects.host.Hosts :param services: services objects, used to raise dependency check :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if one state matched the status list, otherwise False :rtype: bool ''' pass def is_no_check_dependent(self, hosts, services, timeperiods): '''Check if there is some host/service that this object depend on has a state in the status list . :param hosts: hosts objects, used to raise dependency check :type hosts: alignak.objects.host.Hosts :param services: services objects, used to raise dependency check :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if this object has a check dependency, otherwise False :rtype: bool ''' pass def raise_dependencies_check(self, ref_check, hosts, services, timeperiods, macromodulations, checkmodulations, checks): '''Get checks that we depend on if EVERY following conditions is met:: * timeperiod is valid * dep.last_state_update < now - cls.cached_check_horizon (check of dependency is "old") :param ref_check: Check we want to get dependency from :type ref_check: alignak.check.Check :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: check created and check in_checking :rtype: dict ''' pass def schedule(self, hosts, services, timeperiods, macromodulations, checkmodulations, checks, force=False, force_time=None): '''Main scheduling function If a check is in progress, or active check are disabled, do not schedule a check. The check interval change with HARD state:: * SOFT: retry_interval * HARD: check_interval The first scheduling is evenly distributed, so all checks are not launched at the same time. :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :param force: tell if we forced this object to schedule a check :type force: bool :param force_time: time we would like the check to be scheduled :type force_time: None | int :return: None ''' pass def compensate_system_time_change(self, difference): '''If a system time change occurs we have to update properties time related to reflect change :param difference: difference between new time and old time :type difference: :return: None ''' pass def disable_active_checks(self, checks): '''Disable active checks for this host/service Update check in progress with current object information :param checks: Checks object, to change all checks in progress :type checks: alignak.objects.check.Checks :return: None ''' pass def remove_in_progress_check(self, check): '''Remove check from check in progress :param check: Check to remove :type check: alignak.objects.check.Check :return: None ''' pass def update_in_checking(self): '''Update in_checking attribute. Object is in checking if we have checks in check_in_progress list :return: None ''' pass def remove_in_progress_notification(self, notification): ''' Remove a notification and mark them as zombie :param notification: the notification to remove :type notification: alignak.notification.Notification :return: None ''' pass def remove_in_progress_notifications(self, master=True): '''Remove all notifications from notifications_in_progress Preserves some specific notifications (downtime, ...) :param master: remove master notifications only if True (default value) :type master: bool :param force: force remove all notifications except if False :type force: bool :return:None ''' pass def get_event_handlers(self, hosts, macromodulations, timeperiods, ext_cmd=False): '''Raise event handlers if NONE of the following conditions is met:: * externalcmd is False and event_handlers are disabled (globally or locally) * externalcmd is False and object is in scheduled dowtime and no event handlers in downtime * self.event_handler and cls.global_event_handler are None :param hosts: hosts objects, used to get data for macros :type hosts: alignak.objects.host.Hosts :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used for macros evaluation :type timeperiods: alignak.objects.timeperiod.Timeperiods :param ext_cmd: tells if this function was called when handling an external_command. :type ext_cmd: bool :return: None ''' pass def get_snapshot(self, hosts, macromodulations, timeperiods): ''' Raise snapshot event handlers if NONE of the following conditions is met:: * snapshot_command is None * snapshot_enabled is disabled * snapshot_criteria does not matches current state * last_snapshot > now - snapshot_interval * interval_length (previous snapshot too early) * snapshot_period is not valid :param hosts: hosts objects, used to get data for macros :type hosts: alignak.objects.host.Hosts :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used for snapshot period and macros evaluation :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None ''' pass def check_for_flexible_downtime(self, timeperiods, hosts, services): '''Enter in a downtime if necessary and raise start notification When a non Ok state occurs we try to raise a flexible downtime. :param timeperiods: Timeperiods objects, used for downtime period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param hosts: hosts objects, used to enter downtime :type hosts: alignak.objects.host.Hosts :param services: services objects, used to enter downtime :type services: alignak.objects.service.Services :return: None ''' pass def update_hard_unknown_phase_state(self): '''Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important, and they should not raise notif about it :return: None ''' pass def consume_result(self, chk, notification_period, hosts, services, timeperiods, macromodulations, checkmodulations, bi_modulations, res_modulations, checks, raise_log): '''Consume a check return and send action in return main function of reaction of checks like raise notifications Special cases:: * is_flapping: immediate notification when a problem raises * is_in_scheduled_downtime: no notification * is_volatile: immediate notification when a problem raises (service only) Basically go through all cases (combination of last_state, current_state, attempt number) and do necessary actions (add attempt, raise notification., change state type.) :param chk: check to handle :type chk: alignak.objects.check.Check :param notification_period: notification period for this host/service :type notification_period: alignak.objects.timeperiod.Timeperiod :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for all kind of timeperiod (notif, check) :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :param res_modulations: result modulation are used to change the ouput of a check :type res_modulations: alignak.object.resultmodulation.Resultmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: Dependent checks :rtype list[alignak.check.Check] ''' pass def update_event_and_problem_id(self): '''Update current_event_id and current_problem_id Those attributes are used for macros (SERVICEPROBLEMID ...) :return: None ''' pass def prepare_notification_for_sending(self, notif, contact, macromodulations, timeperiods, host_ref): '''Used by scheduler when a notification is ok to be sent (to reactionner). Here we update the command with status of now, and we add the contact to set of contact we notified. And we raise the log entry :param notif: notification to send :type notif: alignak.objects.notification.Notification :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used to get modulation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: None ''' pass def update_notification_command(self, notif, contact, macromodulations, timeperiods, host_ref=None): '''Update the notification command by resolving Macros And because we are just launching the notification, we can say that this contact has been notified :param notif: notification to send :type notif: alignak.objects.notification.Notification :param contact: contact for this host/service :type contact: alignak.object.contact.Contact :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used to get modulation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: None ''' pass def is_escalable(self, notification, escalations, timeperiods): '''Check if a notification can be escalated. Basically call is_eligible for each escalation :param notification: notification we would like to escalate :type notification: alignak.objects.notification.Notification :param escalations: Esclations objects, used to get escalation objects (period) :type escalations: alignak.objects.escalation.Escalations :param timeperiods: Timeperiods objects, used to get escalation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: True if notification can be escalated, otherwise False :rtype: bool ''' pass def get_next_notification_time(self, notif, escalations, timeperiods): '''Get the next notification time for a notification Take the standard notification_interval or ask for our escalation if one of them need a smaller value to escalade :param notif: Notification we need time :type notif: alignak.objects.notification.Notification :param escalations: Esclations objects, used to get escalation objects (interval, period) :type escalations: alignak.objects.escalation.Escalations :param timeperiods: Timeperiods objects, used to get escalation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: Timestamp of next notification :rtype: int ''' pass def get_escalable_contacts(self, notification, escalations, timeperiods): '''Get all contacts (uniq) from eligible escalations :param notification: Notification to get data from (notif number...) :type notification: alignak.objects.notification.Notification :param escalations: Esclations objects, used to get escalation objects (contact, period) :type escalations: alignak.objects.escalation.Escalations :param timeperiods: Timeperiods objects, used to get escalation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: Contact uuid list that can be notified for escalation :rtype: list ''' pass def create_notifications(self, n_type, notification_period, hosts, services, t_wished=None, author_data=None): '''Create a "master" notification here, which will later (immediately before the reactionner gets it) be split up in many "child" notifications, one for each contact. :param n_type: notification type ("PROBLEM", "RECOVERY" ...) :type n_type: str :param notification_period: notification period for this host/service :type notification_period: alignak.objects.timeperiod.Timeperiod :param hosts: hosts objects, used to check if a notif is blocked :type hosts: alignak.objects.host.Hosts :param services: services objects, used to check if a notif is blocked :type services: alignak.objects.service.Services :param t_wished: time we want to notify :type t_wished: int :param author_data: notification author data (eg. for a downtime notification) :type author_data: dict (containing author, author_name ad a comment) :return: None ''' pass def scatter_notification(self, notif, contacts, notifways, timeperiods, macromodulations, escalations, host_ref): '''In create_notifications we created a notification master (eg. a template). When it's time to hand it over to the reactionner, this master notification needs to be split in several child notifications, one for each contact To be more exact, one for each contact who is willing to accept notifications of this type and at this time :param notif: Notification to scatter :type notif: alignak.objects.notification.Notification :param contacts: Contacts objects, used to retreive contact for this object :type contacts: alignak.objects.contact.Contacts :param notifways: Notificationway objects, used to get notific commands :type notifways: alignak.object.notificationway.Notificationways :param timeperiods: Timeperiods objects, used to check if notif are allowed at this time :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param escalations: Esclations objects, used to get escalated contacts :type escalations: alignak.objects.escalation.Escalations :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: child notifications :rtype: list[alignak.objects.notification.Notification] ''' pass def launch_check(self, timestamp, hosts, services, timeperiods, macromodulations, checkmodulations, checks, ref_check=None, force=False, dependent=False): '''Launch a check (command) :param hosts: list of all hosts :param services: list of all services :param timeperiods: list of all time periods :param checks: list of all current checks :param timestamp: :type timestamp: int :param macromodulations: list of all macro modulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param ref_check: :type ref_check: :param force: :type force: bool :param dependent: :type dependent: bool :return: None or alignak.check.Check :rtype: None | alignak.check.Check ''' pass def get_time_to_orphanage(self): '''Get time to orphanage :: * 0 : don't check for orphans * non zero : number of secs that can pass before marking the check an orphan. :return: integer with the meaning explained above :rtype: int ''' pass def get_perfdata_command(self, hosts, macromodulations, timeperiods): '''Add event_handler to process performance data if necessary (not disabled) :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :return: None ''' pass def create_business_rules(self, hosts, services, hostgroups, servicegroups, macromodulations, timeperiods, running=False): '''Create business rules if necessary (cmd contains bp_rule) :param hosts: Hosts object to look for objects :type hosts: alignak.objects.host.Hosts :param services: Services object to look for objects :type services: alignak.objects.service.Services :param running: flag used in eval_cor_pattern function :type running: bool :return: None ''' pass def get_business_rule_output(self, hosts, services, macromodulations, timeperiods): ''' Returns a status string for business rules based items formatted using business_rule_output_template attribute as template. The template may embed output formatting for itself, and for its child (dependent) items. Child format string is expanded into the $( and )$, using the string between brackets as format string. Any business rule based item or child macro may be used. In addition, the $STATUS$, $SHORTSTATUS$ and $FULLNAME$ macro which name is common to hosts and services may be used to ease template writing. Caution: only children in state not OK are displayed. Example: A business rule with a format string looking like "$STATUS$ [ $($TATUS$: $HOSTNAME$,$SERVICEDESC$ )$ ]" Would return "CRITICAL [ CRITICAL: host1,srv1 WARNING: host2,srv2 ]" :param hosts: Hosts object to look for objects :type hosts: alignak.objects.host.Hosts :param services: Services object to look for objects :type services: alignak.objects.service.Services :param macromodulations: Macromodulations object to look for objects :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods object to look for objects :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: status for business rules :rtype: str ''' pass def business_rule_notification_is_blocked(self, hosts, services): '''Process business rule notifications behaviour. If all problems have been acknowledged, no notifications should be sent if state is not OK. By default, downtimes are ignored, unless explicitly told to be treated as acknowledgements through with the business_rule_downtime_as_ack set. :return: True if all source problem are acknowledged, otherwise False :rtype: bool ''' pass def manage_internal_check(self, hosts, services, check, hostgroups, servicegroups, macromodulations, timeperiods): '''Manage internal commands such as :: * bp_rule * _internal_host_up * _echo :param hosts: Used to create business rules :type hosts: alignak.objects.host.Hosts :param services: Used to create business rules :type services: alignak.objects.service.Services :param check: internal check to manage :type check: alignak.objects.check.Check :return: None ''' pass def fill_data_brok_from(self, data, brok_type): '''Fill data brok dependent on the brok_type :param data: data to fill :type data: dict :param brok_type: brok type :type: str :return: None ''' pass def acknowledge_problem(self, notification_period, hosts, services, sticky, notify, author, comment, end_time=0): ''' Add an acknowledge :param sticky: acknowledge will be always present is host return in UP state :type sticky: integer :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :param end_time: end (timeout) of this acknowledge in seconds(timestamp) (0 to never end) :type end_time: int :return: None | alignak.comment.Comment ''' pass def check_for_expire_acknowledge(self): ''' If have acknowledge and is expired, delete it :return: None ''' pass def unacknowledge_problem(self): ''' Remove the acknowledge, reset the flag. The comment is deleted :return: None ''' pass def unacknowledge_problem_if_not_sticky(self): ''' Remove the acknowledge if it is not sticky :return: None ''' pass def raise_check_result(self): '''Raise ACTIVE CHECK RESULT entry Function defined in inherited objects (Host and Service) :return: None ''' pass def raise_alert_log_entry(self): '''Raise ALERT entry Function defined in inherited objects (Host and Service) :return: None ''' pass def raise_acknowledge_log_entry(self): '''Raise ACKNOWLEDGE STARTED entry Function defined in inherited objects (Host and Service) :return: None ''' pass def raise_unacknowledge_log_entry(self): '''Raise ACKNOWLEDGE STOPPED entry Function defined in inherited objects (Host and Service) :return: None ''' pass def is_state(self, status): '''Return if status match the current item status :param status: status to compare. Usually comes from config files :type status: str :return: True :rtype: bool ''' pass def raise_freshness_log_entry(self, t_stale_by): '''Raise freshness alert entry (warning level) Example : "The freshness period of host 'host_name' is expired by 0d 0h 17m 6s (threshold=0d 1h 0m 0s). Attempt: 1 / 1. I'm forcing the state to freshness state (d / HARD)" :param t_stale_by: time in seconds the host has been in a stale state :type t_stale_by: int :return: None ''' pass def raise_snapshot_log_entry(self, command): '''Raise item SNAPSHOT entry (critical level) Format is : "ITEM SNAPSHOT: *self.get_name()*;*state*;*state_type*;*attempt*; *command.get_name()*" Example : "HOST SNAPSHOT: server;UP;HARD;1;notify-by-rss" :param command: Snapshot command launched :type command: alignak.objects.command.Command :return: None ''' pass def raise_flapping_start_log_entry(self, change_ratio, threshold): '''Raise FLAPPING ALERT START entry (critical level) :param change_ratio: percent of changing state :type change_ratio: float :param threshold: threshold (percent) to trigger this log entry :type threshold: float :return: None ''' pass def raise_event_handler_log_entry(self, command): '''Raise EVENT HANDLER entry (critical level) :param command: Handler launched :type command: alignak.objects.command.Command :return: None ''' pass def raise_flapping_stop_log_entry(self, change_ratio, threshold): '''Raise FLAPPING ALERT STOPPED entry (critical level) :param change_ratio: percent of changing state :type change_ratio: float :param threshold: threshold (percent) to trigger this log entry :type threshold: float :return: None ''' pass def raise_notification_log_entry(self, notif, contact, host_ref): '''Raise NOTIFICATION entry (critical level) :param notif: notification object created by service alert :type notif: alignak.objects.notification.Notification :return: None ''' pass def get_data_for_checks(self, hosts): '''Get data for a check :return: list containing a single host (this one) :rtype: list ''' pass def get_data_for_event_handler(self, hosts): '''Get data for an event handler :return: list containing a single host (this one) :rtype: list ''' pass def get_data_for_notifications(self, contact, notif, host_ref): '''Get data for a notification :param contact: The contact to return :type contact: :param notif: the notification to return :type notif: :return: list containing the host and the given parameters :rtype: list ''' pass def set_impact_state(self): '''We just go an impact, so we go unreachable But only if we enable this state change in the conf :return: None ''' pass def unset_impact_state(self): '''Unset impact, only if impact state change is set in configuration :return: None ''' pass def set_unreachable(self): '''Set unreachable: all our parents (dependencies) are not ok Unreachable is different from down/critical :return:None ''' pass def manage_stalking(self, check): '''Check if the item need stalking or not (immediate recheck) :param check: finished check (check.status == 'waitconsume') :type check: alignak.check.Check :return: None ''' pass def set_state_from_exit_status(self, status, notif_period, hosts, services): '''Set the state with the status of a check. Also update last_state :param status: integer between 0 and 3 :type status: int :param hosts: hosts objects, used for almost every operation :type hosts: alignak.objects.host.Hosts :param services: services objects, used for almost every operation :type services: alignak.objects.service.Services :return: None ''' pass def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished): '''Check if a notification is blocked by item :param n_type: notification type :type n_type: :param t_wished: the time we should like to notify the host (mostly now) :type t_wished: float :return: True if ONE of the above condition was met, otherwise False :rtype: bool ''' pass def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact): '''Check if the notification is blocked by this contact. :param notif: notification created earlier :type notif: alignak.notification.Notification :param contact: contact we want to notify :type notif: alignak.objects.contact.Contact :return: True if the notification is blocked, False otherwise :rtype: bool ''' pass def is_correct(self): ''' Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool ''' pass
85
79
36
4
18
14
5
0.71
1
17
8
2
79
58
79
113
3,279
408
1,696
371
1,595
1,202
1,125
347
1,045
54
3
6
403
3,988
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/schedulingitem.py
alignak.objects.schedulingitem.SchedulingItems
class SchedulingItems(CommandCallItems): """Class to handle schedulingitems. It's mainly for configuration """ def find_by_filter(self, filters, all_items): """ Find items by filters :param filters: list of filters :type filters: list :param all_items: monitoring items :type: dict :return: list of items :rtype: list """ items = [] for i in self: failed = False if hasattr(i, "host"): all_items["service"] = i else: all_items["host"] = i for filt in filters: if not filt(all_items): failed = True break if failed is False: items.append(i) return items def add_act_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period, inherits_parents): """ Add a logical dependency for actions between two hosts or services. :param son_id: uuid of son host :type son_id: str :param parent_id: uuid of parent host :type parent_id: str :param notif_failure_criteria: notification failure criteria, notification for a dependent host may vary :type notif_failure_criteria: list :param dep_period: dependency period. Timeperiod for dependency may vary :type dep_period: str | None :param inherits_parents: if this dep will inherit from parents (timeperiod, status) :type inherits_parents: bool :return: """ if son_id in self: son = self[son_id] else: self.add_error("Dependent son (%s) is unknown, configuration error!" % son_id) parent = self[parent_id] son.act_depend_of.append((parent_id, notif_failure_criteria, dep_period, inherits_parents)) parent.act_depend_of_me.append((son_id, notif_failure_criteria, dep_period, inherits_parents)) # Parent / children relations if parent_id not in son.parent_dependencies: son.parent_dependencies.append(parent_id) if son_id not in parent.child_dependencies: parent.child_dependencies.append(son_id) def del_act_dependency(self, son_id, parent_id): # pragma: no cover, not yet tested """Remove act_dependency between two hosts or services. TODO: do we really intend to remove dynamically ? :param son_id: uuid of son host/service :type son_id: str :param parent_id: uuid of parent host/service :type parent_id: str :return: None """ son = self[son_id] parent = self[parent_id] to_del = [] # First we remove in my list for (host, status, timeperiod, inherits_parent) in son.act_depend_of: if host == parent_id: to_del.append((host, status, timeperiod, inherits_parent)) for tup in to_del: son.act_depend_of.remove(tup) # And now in the father part to_del = [] for (host, status, timeperiod, inherits_parent) in parent.act_depend_of_me: if host == son_id: to_del.append((host, status, timeperiod, inherits_parent)) for tup in to_del: parent.act_depend_of_me.remove(tup) # Remove in child/parents dependencies too # Me in father list parent.child_dependencies.delete(son_id) # and father list in mine son.parent_dependencies.delete(parent_id) def add_chk_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period, inherits_parents): """ Add a logical dependency for checks between two hosts or services. :param son_id: uuid of son host/service :type son_id: str :param parent_id: uuid of parent host/service :type parent_id: str :param notif_failure_criteria: notification failure criteria, notification for a dependent host may vary :type notif_failure_criteria: list :param dep_period: dependency period. Timeperiod for dependency may vary :type dep_period: str :param inherits_parents: if this dep will inherit from parents (timeperiod, status) :type inherits_parents: bool :return: """ son = self[son_id] parent = self[parent_id] son.chk_depend_of.append((parent_id, notif_failure_criteria, 'logic_dep', dep_period, inherits_parents)) parent.chk_depend_of_me.append((son_id, notif_failure_criteria, 'logic_dep', dep_period, inherits_parents)) # Parent / children relations if parent_id not in son.parent_dependencies: son.parent_dependencies.append(parent_id) if son_id not in parent.child_dependencies: parent.child_dependencies.append(son_id) def create_business_rules(self, hosts, services, hostgroups, servicegroups, macromodulations, timeperiods): """ Loop on hosts or services and call SchedulingItem.create_business_rules :param hosts: hosts to link to :type hosts: alignak.objects.host.Hosts :param services: services to link to :type services: alignak.objects.service.Services :param hostgroups: hostgroups to link to :type hostgroups: alignak.objects.hostgroup.Hostgroups :param servicegroups: servicegroups to link to :type servicegroups: alignak.objects.servicegroup.Servicegroups :param macromodulations: macromodulations to link to :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: timeperiods to link to :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None """ for item in self: item.create_business_rules(hosts, services, hostgroups, servicegroups, macromodulations, timeperiods)
class SchedulingItems(CommandCallItems): '''Class to handle schedulingitems. It's mainly for configuration ''' def find_by_filter(self, filters, all_items): ''' Find items by filters :param filters: list of filters :type filters: list :param all_items: monitoring items :type: dict :return: list of items :rtype: list ''' pass def add_act_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period, inherits_parents): ''' Add a logical dependency for actions between two hosts or services. :param son_id: uuid of son host :type son_id: str :param parent_id: uuid of parent host :type parent_id: str :param notif_failure_criteria: notification failure criteria, notification for a dependent host may vary :type notif_failure_criteria: list :param dep_period: dependency period. Timeperiod for dependency may vary :type dep_period: str | None :param inherits_parents: if this dep will inherit from parents (timeperiod, status) :type inherits_parents: bool :return: ''' pass def del_act_dependency(self, son_id, parent_id): '''Remove act_dependency between two hosts or services. TODO: do we really intend to remove dynamically ? :param son_id: uuid of son host/service :type son_id: str :param parent_id: uuid of parent host/service :type parent_id: str :return: None ''' pass def add_chk_dependency(self, son_id, parent_id, notif_failure_criteria, dep_period, inherits_parents): ''' Add a logical dependency for checks between two hosts or services. :param son_id: uuid of son host/service :type son_id: str :param parent_id: uuid of parent host/service :type parent_id: str :param notif_failure_criteria: notification failure criteria, notification for a dependent host may vary :type notif_failure_criteria: list :param dep_period: dependency period. Timeperiod for dependency may vary :type dep_period: str :param inherits_parents: if this dep will inherit from parents (timeperiod, status) :type inherits_parents: bool :return: ''' pass def create_business_rules(self, hosts, services, hostgroups, servicegroups, macromodulations, timeperiods): ''' Loop on hosts or services and call SchedulingItem.create_business_rules :param hosts: hosts to link to :type hosts: alignak.objects.host.Hosts :param services: services to link to :type services: alignak.objects.service.Services :param hostgroups: hostgroups to link to :type hostgroups: alignak.objects.hostgroup.Hostgroups :param servicegroups: servicegroups to link to :type servicegroups: alignak.objects.servicegroup.Servicegroups :param macromodulations: macromodulations to link to :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: timeperiods to link to :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None ''' pass
6
6
29
2
13
14
4
1.14
1
0
0
2
5
0
5
52
152
16
64
23
55
73
55
20
49
7
3
3
22
3,989
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/service.py
alignak.objects.service.Service
class Service(SchedulingItem): """Service class implements monitoring concepts for service. For example it defines parents, check_interval, check_command etc. """ # AutoSlots create the __slots__ with properties and # running_properties names __metaclass__ = AutoSlots # The host and service do not have the same 0 value, now yes :) ok_up = u'OK' # used by item class for format specific value like for Broks my_type = 'service' my_name_property = "service_description" my_index_property = "host_service" # properties defined by configuration # required: is required in conf # default: default value if no set in conf # pythonize: function to call when transforming string to python object # fill_brok: if set, send to broker. there are two categories: # full_status for initial and update status, check_result for check results # no_slots: do not take this property for __slots__ properties = SchedulingItem.properties.copy() properties.update({ 'alias': StringProp(default=u'', fill_brok=[FULL_STATUS]), 'host_name': StringProp(fill_brok=[FULL_STATUS, CHECK_RESULT, 'next_schedule'], special=True), 'hostgroup_name': StringProp(default='', fill_brok=[FULL_STATUS], merging='join', special=True), 'service_description': StringProp(fill_brok=[FULL_STATUS, CHECK_RESULT, 'next_schedule']), 'servicegroups': ListProp(default=[], fill_brok=[FULL_STATUS], merging='join'), 'is_volatile': BoolProp(default=False, fill_brok=[FULL_STATUS]), 'check_command': StringProp(fill_brok=[FULL_STATUS]), 'flap_detection_options': ListProp(default=['o', 'w', 'c', 'u', 'x'], fill_brok=[FULL_STATUS], split_on_comma=True), 'notification_options': ListProp(default=['w', 'u', 'c', 'r', 'f', 's', 'x'], fill_brok=[FULL_STATUS], split_on_comma=True), 'parallelize_check': BoolProp(default=True, fill_brok=[FULL_STATUS]), 'merge_host_contacts': BoolProp(default=False, fill_brok=[FULL_STATUS]), 'host_dependency_enabled': BoolProp(default=True, fill_brok=[FULL_STATUS]), 'freshness_state': CharProp(default='x', fill_brok=[FULL_STATUS]), # Easy Service dep definition 'service_dependencies': ListProp(default=[], merging='join', split_on_comma=True, keep_empty=True), # service generator 'duplicate_foreach': StringProp(default=''), 'default_value': StringProp(default=''), # UI aggregation 'aggregation': StringProp(default='', fill_brok=[FULL_STATUS]), 'snapshot_criteria': ListProp(default=['w', 'c', 'u', 'x'], fill_brok=[FULL_STATUS], merging='join'), }) # properties used in the running state running_properties = SchedulingItem.running_properties.copy() running_properties.update({ 'state': StringProp(default=u'OK', fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_time_ok': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_time_warning': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_time_critical': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_time_unknown': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'last_time_unreachable': IntegerProp(default=0, fill_brok=[FULL_STATUS, CHECK_RESULT], retention=True), 'host': StringProp(default=None), 'state_before_hard_unknown_reach_phase': StringProp(default=u'OK', retention=True), }) special_properties = ( 'service_description' ) # Mapping between Macros and properties (can be prop or a function) macros = SchedulingItem.macros.copy() macros.update({ 'SERVICEDESC': 'service_description', 'SERVICEDISPLAYNAME': 'display_name', 'SERVICESTATE': 'state', 'SERVICESTATEID': 'state_id', 'LASTSERVICESTATE': 'last_state', 'LASTSERVICESTATEID': 'last_state_id', 'SERVICESTATETYPE': 'state_type', 'SERVICEATTEMPT': 'attempt', 'MAXSERVICEATTEMPTS': 'max_check_attempts', 'SERVICEISVOLATILE': 'is_volatile', 'SERVICEEVENTID': 'current_event_id', 'LASTSERVICEEVENTID': 'last_event_id', 'SERVICEPROBLEMID': 'current_problem_id', 'LASTSERVICEPROBLEMID': 'last_problem_id', 'SERVICELATENCY': 'latency', 'SERVICEEXECUTIONTIME': 'execution_time', 'SERVICEDURATION': 'get_duration', 'SERVICEDURATIONSEC': 'get_duration_sec', 'SERVICEDOWNTIME': 'get_downtime', 'SERVICEPERCENTCHANGE': 'percent_state_change', 'SERVICEGROUPNAME': ('get_groupname', ['servicegroups']), 'SERVICEGROUPNAMES': ('get_groupnames', ['servicegroups']), 'LASTSERVICECHECK': 'last_chk', 'LASTSERVICESTATECHANGE': 'last_state_change', 'LASTSERVICEOK': 'last_time_ok', 'LASTSERVICEWARNING': 'last_time_warning', 'LASTSERVICEUNKNOWN': 'last_time_unknown', 'LASTSERVICEUNREACHABLE': 'last_time_unreachable', 'LASTSERVICECRITICAL': 'last_time_critical', 'SERVICEOUTPUT': 'output', 'LONGSERVICEOUTPUT': 'long_output', 'SERVICEPERFDATA': 'perf_data', 'LASTSERVICEPERFDATA': 'last_perf_data', 'SERVICECHECKCOMMAND': 'get_check_command', 'SERVICESNAPSHOTCOMMAND': 'get_snapshot_command', 'SERVICEACKAUTHOR': 'get_ack_author_name', 'SERVICEACKAUTHORNAME': 'get_ack_author_name', 'SERVICEACKAUTHORALIAS': 'get_ack_author_name', 'SERVICEACKCOMMENT': 'get_ack_comment', 'SERVICEACTIONURL': 'action_url', 'SERVICENOTESURL': 'notes_url', 'SERVICENOTES': 'notes', 'SERVICEBUSINESSIMPACT': 'business_impact', }) # This tab is used to transform old parameters name into new ones # so from Nagios2 format, to Nagios3 ones. # Or Alignak deprecated names like criticity old_properties = SchedulingItem.old_properties.copy() old_properties.update({ 'hostgroup': 'hostgroup_name', 'hostgroups': 'hostgroup_name', }) def __str__(self): # pragma: no cover return '<Service%s %s, uuid=%s, %s (%s), use: %s />' \ % (' template' if self.is_a_template() else '', self.get_full_name(), getattr(self, 'uuid', 'n/a'), getattr(self, 'state', 'n/a'), getattr(self, 'state_type', 'n/a'), getattr(self, 'tags', None)) __repr__ = __str__ @property def realm(self): """Get the service realm... indeed it is the service's host one!""" if not getattr(self, 'host', None): return None return self.host.realm @property def overall_state_id(self): """Get the service overall state. The service overall state identifier is the service status including: - the monitored state - the acknowledged state - the downtime state The overall state is (prioritized): - a service is not monitored (5) - a service critical or unreachable (4) - a service warning or unknown (3) - a service downtimed (2) - a service acknowledged (1) - a service ok (0) *Note* that services in unknown state are considered as warning, and unreachable ones are considered as critical! Also note that the service state is considered only for HARD state type! """ overall_state = 0 if not self.monitored: overall_state = 5 elif self.acknowledged: overall_state = 1 elif self.downtimed: overall_state = 2 elif self.state_type == 'HARD': if self.state == 'WARNING': overall_state = 3 elif self.state == 'CRITICAL': overall_state = 4 elif self.state == 'UNKNOWN': overall_state = 3 elif self.state == 'UNREACHABLE': overall_state = 4 return overall_state ####### # __ _ _ _ # / _(_) | | (_) # ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __ # / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \ # | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | | # \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_| # __/ | # |___/ ###### def fill_predictive_missing_parameters(self): """define state with initial_state :return: None """ if self.initial_state == 'w': self.state = u'WARNING' elif self.initial_state == 'u': self.state = u'UNKNOWN' elif self.initial_state == 'c': self.state = u'CRITICAL' elif self.initial_state == 'x': self.state = u'UNREACHABLE' @property def host_service(self): # actually only used for (un)indexitem() via name_property.. """Unique key for a service :return: Tuple with host_name and service_description :rtype: tuple """ return self.get_host_name(), self.get_name() @property def display_name(self): """Display_name if defined, else service_description :return: service description or service display_name :rtype: str """ return getattr(self, '_display_name', self.get_name()) @display_name.setter def display_name(self, display_name): """Setter for display_name attribute :param display_name: value to set :return: None """ self._display_name = display_name def get_host_name(self): """Returns the service host name :return: service' host name :rtype: str """ host_name = getattr(self, 'host_name', 'unhosted') if not host_name: return 'unhosted' return host_name def get_full_name(self): """Get the full name for debugging (host_name/service_description) :return: service full name :rtype: str """ if self.get_host_name() and hasattr(self, 'service_description'): return "%s/%s" % (self.get_host_name(), self.service_description) return self.get_name() def get_servicegroups(self): """Accessor to servicegroups attribute :return: servicegroup list object of host :rtype: list """ return self.servicegroups def get_groupnames(self, sgs): """Get servicegroups list :return: comma separated list of servicegroups :rtype: str """ return ','.join([sgs[sg].get_name() for sg in self.servicegroups]) def get_hostgroups(self, hosts): """Wrapper to access hostgroups attribute of host attribute :return: service hostgroups (host one) :rtype: alignak.objects.hostgroup.Hostgroups """ return hosts[self.host].hostgroups def get_host_tags(self, hosts): """Wrapper to access tags attribute of host attribute :return: service tags (host one) :rtype: alignak.objects.tag.Tags """ return hosts[self.host].tags def get_service_tags(self): """Accessor to tags attribute :return: service tags :rtype: alignak.objects.tag.Tags """ return self.tags def is_correct(self): """Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool """ state = True cls = self.__class__ hname = getattr(self, 'host_name', '') hgname = getattr(self, 'hostgroup_name', '') sdesc = getattr(self, 'service_description', '') if not sdesc: self.add_error("a %s has been defined without service_description, from: %s" % (self.my_type, self.imported_from)) elif not hname and not hgname: self.add_error("a %s has been defined without host_name nor " "hostgroup_name, from: %s" % (self.my_type, self.imported_from)) elif not hname: self.add_error("not bound to any host.") elif self.host is None: self.add_error("unknown host_name '%s'" % self.host_name) # Set display_name if needed if not getattr(self, 'display_name', ''): self.display_name = "%s/%s" % (hname, sdesc) for char in cls.illegal_object_name_chars: if char not in self.service_description: continue self.add_error("service_description got an illegal character: %s" % char) return super(Service, self).is_correct() and state def duplicate(self, host): # pylint: disable=too-many-locals """For a given host, look for all copy we must create for for_each property :param host: alignak host object :type host: alignak.objects.host.Host :return: list :rtype: list """ duplicates = [] # In macro, it's all in UPPER case prop = self.duplicate_foreach.strip().upper() if prop not in host.customs: # If I do not have the property, we bail out return duplicates # Get the list entry, and the not one if there is one entry = host.customs[prop] # Look at the list of the key we do NOT want maybe, # for _disks it will be _!disks not_entry = host.customs.get('_' + '!' + prop[1:], '').split(',') not_keys = strip_and_uniq(not_entry) default_value = getattr(self, 'default_value', '') # Transform the generator string to a list # Missing values are filled with the default value try: key_values = tuple(generate_key_value_sequences(entry, default_value)) except KeyValueSyntaxError as exc: fmt_dict = { 'prop': self.duplicate_foreach, 'host': host.get_name(), 'svc': self.service_description, 'entry': entry, 'exc': exc, } err = ( "The custom property %(prop)r of the " "host %(host)r is not a valid entry for a service generator: %(exc)s, " "with entry=%(entry)r") % fmt_dict logger.warning(err) host.add_error(err) return duplicates for key_value in key_values: key = key_value['KEY'] # Maybe this key is in the NOT list, if so, skip it if key in not_keys: continue new_s = self.copy() new_s.host_name = host.get_name() if self.is_a_template(): # if template, the new one is not new_s.register = 1 for key in key_value: if key == 'KEY': if hasattr(self, 'service_description'): # We want to change all illegal chars to a _ sign. # We can't use class.illegal_obj_char # because in the "explode" phase, we do not have access to this data! :( safe_key_value = re.sub(r'[' + "`~!$%^&*\"|'<>?,()=" + ']+', '_', key_value[key]) new_s.service_description = self.service_description.replace( '$' + key + '$', safe_key_value ) # Here is a list of property where we will expand the $KEY$ by the value _the_expandables = ['check_command', 'aggregation', 'event_handler'] for prop in _the_expandables: if hasattr(self, prop): # here we can replace VALUE, VALUE1, VALUE2,... setattr(new_s, prop, getattr(new_s, prop).replace('$' + key + '$', key_value[key])) if hasattr(self, 'service_dependencies'): for i, servicedep in enumerate(new_s.service_dependencies): new_s.service_dependencies[i] = servicedep.replace( '$' + key + '$', key_value[key] ) # And then add in our list this new service duplicates.append(new_s) return duplicates ##### # _ # (_) # _ __ _ _ _ __ _ __ _ _ __ __ _ # | '__| | | | '_ \| '_ \| | '_ \ / _` | # | | | |_| | | | | | | | | | | | (_| | # |_| \__,_|_| |_|_| |_|_|_| |_|\__, | # __/ | # |___/ #### def set_state_from_exit_status(self, status, notif_period, hosts, services): """Set the state in UP, WARNING, CRITICAL, UNKNOWN or UNREACHABLE according to the status of a check result. :param status: integer between 0 and 4 :type status: int :return: None """ now = time.time() # we should put in last_state the good last state: # if not just change the state by an problem/impact # we can take current state. But if it's the case, the # real old state is self.state_before_impact (it's the TRUE # state in fact) # but only if the global conf have enable the impact state change cls = self.__class__ if cls.enable_problem_impacts_states_change \ and self.is_impact \ and not self.state_changed_since_impact: self.last_state = self.state_before_impact else: # standard case self.last_state = self.state # The last times are kept as integer values rather than float... no need for ms! if status == 0: self.state = u'OK' self.state_id = 0 self.last_time_ok = int(self.last_state_update) # self.last_time_ok = self.last_state_update state_code = 'o' elif status == 1: self.state = u'WARNING' self.state_id = 1 self.last_time_warning = int(self.last_state_update) # self.last_time_warning = self.last_state_update state_code = 'w' elif status == 2: self.state = u'CRITICAL' self.state_id = 2 self.last_time_critical = int(self.last_state_update) # self.last_time_critical = self.last_state_update state_code = 'c' elif status == 3: self.state = u'UNKNOWN' self.state_id = 3 self.last_time_unknown = int(self.last_state_update) # self.last_time_unknown = self.last_state_update state_code = 'u' elif status == 4: self.state = u'UNREACHABLE' self.state_id = 4 self.last_time_unreachable = int(self.last_state_update) # self.last_time_unreachable = self.last_state_update state_code = 'x' else: self.state = u'CRITICAL' # exit code UNDETERMINED self.state_id = 2 self.last_time_critical = int(self.last_state_update) # self.last_time_critical = self.last_state_update state_code = 'c' if state_code in self.flap_detection_options: self.add_flapping_change(self.state != self.last_state) # Now we add a value, we update the is_flapping prop self.update_flapping(notif_period, hosts, services) if self.state != self.last_state: self.last_state_change = self.last_state_update self.duration_sec = now - self.last_state_change def is_state(self, status): # pylint: disable=too-many-return-statements """Return True if status match the current service status :param status: status to compare ( "o", "c", "w", "u", "x"). Usually comes from config files :type status: str :return: True if status <=> self.status, otherwise False :rtype: bool """ if status == self.state: return True # Now low status if status == 'o' and self.state == u'OK': return True if status == 'c' and self.state == u'CRITICAL': return True if status == 'w' and self.state == u'WARNING': return True if status == 'u' and self.state == u'UNKNOWN': return True if status == 'x' and self.state == u'UNREACHABLE': return True return False def last_time_non_ok_or_up(self): """Get the last time the service was in a non-OK state :return: the nearest last time the service was not ok :rtype: int """ non_ok_times = [x for x in [self.last_time_warning, self.last_time_critical, self.last_time_unknown] if x > self.last_time_ok] if not non_ok_times: last_time_non_ok = 0 # todo: program_start would be better? else: last_time_non_ok = min(non_ok_times) return last_time_non_ok def raise_check_result(self): """Raise ACTIVE CHECK RESULT entry Example : "ACTIVE SERVICE CHECK: server;DOWN;HARD;1;I don't know what to say..." :return: None """ if not self.__class__.log_active_checks: return log_level = 'info' if self.state in [u'WARNING', u'UNREACHABLE']: log_level = 'warning' elif self.state == u'CRITICAL': log_level = 'error' brok = make_monitoring_log( log_level, 'ACTIVE SERVICE CHECK: %s;%s;%s;%d;%s' % (self.host_name, self.get_name(), self.state, self.attempt, self.output) ) self.broks.append(brok) def raise_alert_log_entry(self): """Raise SERVICE ALERT entry Format is : "SERVICE ALERT: *host.get_name()*;*get_name()*;*state*;*state_type*;*attempt* ;*output*" Example : "SERVICE ALERT: server;Load;DOWN;HARD;1;I don't know what to say..." :return: None """ if self.__class__.log_alerts: log_level = 'info' if self.state == 'WARNING': log_level = 'warning' if self.state == 'CRITICAL': log_level = 'error' brok = make_monitoring_log( log_level, 'SERVICE ALERT: %s;%s;%s;%s;%d;%s' % ( self.host_name, self.get_name(), self.state, self.state_type, self.attempt, self.output ) ) self.broks.append(brok) if 'ALIGNAK_LOG_ALERTS' in os.environ: if os.environ['ALIGNAK_LOG_ALERTS'] == 'WARNING': logger.warning('SERVICE ALERT: %s;%s;%s;%s;%d;%s', self.host_name, self.get_name(), self.state, self.state_type, self.attempt, self.output) else: logger.info('SERVICE ALERT: %s;%s;%s;%s;%d;%s', self.host_name, self.get_name(), self.state, self.state_type, self.attempt, self.output) def raise_initial_state(self): """Raise SERVICE HOST ALERT entry (info level) Format is : "SERVICE HOST STATE: *host.get_name()*;*get_name()*;*state*;*state_type* ;*attempt*;*output*" Example : "SERVICE HOST STATE: server;Load;DOWN;HARD;1;I don't know what to say..." :return: None """ if not self.__class__.log_initial_states: return log_level = 'info' if self.state in ['WARNING', 'UNREACHABLE']: log_level = 'warning' if self.state in ['CRITICAL', 'UNKNOWN']: log_level = 'error' brok = make_monitoring_log( log_level, 'CURRENT SERVICE STATE: %s;%s;%s;%s;%d;%s' % ( self.host_name, self.get_name(), self.state, self.state_type, self.attempt, self.output ) ) self.broks.append(brok) def raise_notification_log_entry(self, notif, contact, host_ref): """Raise SERVICE NOTIFICATION entry (critical level) Format is : "SERVICE NOTIFICATION: *contact.get_name()*;*host_name*;*self.get_name()* ;*state*;*command.get_name()*;*output*" Example : "SERVICE NOTIFICATION: superadmin;server;Load;UP;notify-by-rss;no output" :param notif: notification object created by service alert :type notif: alignak.objects.notification.Notification :return: None """ if self.__class__.log_notifications: log_level = 'info' command = notif.command_call if notif.type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED', u'CUSTOM', u'ACKNOWLEDGEMENT', u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED']: state = '%s (%s)' % (notif.type, self.state) else: state = self.state if self.state == 'WARNING': log_level = 'warning' if self.state == 'CRITICAL': log_level = 'error' brok = make_monitoring_log( log_level, "SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s;%s" % ( contact.get_name(), host_ref.get_name(), self.get_name(), state, notif.notif_nb, command.get_name(), self.output ) ) self.broks.append(brok) if 'ALIGNAK_LOG_NOTIFICATIONS' in os.environ: if os.environ['ALIGNAK_LOG_NOTIFICATIONS'] == 'WARNING': logger.warning("SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s;%s", contact.get_name(), host_ref.get_name(), self.get_name(), state, notif.notif_nb, command.get_name(), self.output) else: logger.info("SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s;%s", contact.get_name(), host_ref.get_name(), self.get_name(), state, notif.notif_nb, command.get_name(), self.output) def raise_event_handler_log_entry(self, command): """Raise SERVICE EVENT HANDLER entry (critical level) Format is : "SERVICE EVENT HANDLER: *host_name*;*self.get_name()*;*state*;*state_type* ;*attempt*;*command.get_name()*" Example : "SERVICE EVENT HANDLER: server;Load;UP;HARD;1;notify-by-rss" :param command: Handler launched :type command: alignak.objects.command.Command :return: None """ if not self.__class__.log_event_handlers: return log_level = 'info' if self.state == 'WARNING': log_level = 'warning' if self.state == 'CRITICAL': log_level = 'error' brok = make_monitoring_log( log_level, "SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s" % ( self.host_name, self.get_name(), self.state, self.state_type, self.attempt, command.get_name() ) ) self.broks.append(brok) def raise_snapshot_log_entry(self, command): """Raise SERVICE SNAPSHOT entry (critical level) Format is : "SERVICE SNAPSHOT: *host_name*;*self.get_name()*;*state*;*state_type*; *attempt*;*command.get_name()*" Example : "SERVICE SNAPSHOT: server;Load;UP;HARD;1;notify-by-rss" :param command: Snapshot command launched :type command: alignak.objects.command.Command :return: None """ if not self.__class__.log_snapshots: return log_level = 'info' if self.state == 'WARNING': log_level = 'warning' if self.state == 'CRITICAL': log_level = 'error' brok = make_monitoring_log( log_level, "SERVICE SNAPSHOT: %s;%s;%s;%s;%s;%s" % ( self.host_name, self.get_name(), self.state, self.state_type, self.attempt, command.get_name() ) ) self.broks.append(brok) def raise_flapping_start_log_entry(self, change_ratio, threshold): """Raise SERVICE FLAPPING ALERT START entry (critical level) Format is : "SERVICE FLAPPING ALERT: *host_name*;*self.get_name()*;STARTED; Service appears to have started flapping (*change_ratio*% change >= *threshold*% threshold)" Example : "SERVICE FLAPPING ALERT: server;Load;STARTED; Service appears to have started flapping (50.6% change >= 50.0% threshold)" :param change_ratio: percent of changing state :param threshold: threshold (percent) to trigger this log entry :return: None """ if not self.__class__.log_flappings: return brok = make_monitoring_log( 'info', "SERVICE FLAPPING ALERT: %s;%s;STARTED; Service appears to have " "started flapping (%.1f%% change >= %.1f%% threshold)" % (self.host_name, self.get_name(), change_ratio, threshold) ) self.broks.append(brok) def raise_flapping_stop_log_entry(self, change_ratio, threshold): """Raise SERVICE FLAPPING ALERT STOPPED entry (critical level) Format is : "SERVICE FLAPPING ALERT: *host_name*;*self.get_name()*;STOPPED; Service appears to have started flapping (*change_ratio*% change >= *threshold*% threshold)" Example : "SERVICE FLAPPING ALERT: server;Load;STOPPED; Service appears to have started flapping (50.6% change >= 50.0% threshold)" :param change_ratio: percent of changing state :type change_ratio: float :param threshold: threshold (percent) to trigger this log entry :type threshold: float :return: None """ if not self.__class__.log_flappings: return brok = make_monitoring_log( 'info', "SERVICE FLAPPING ALERT: %s;%s;STOPPED; Service appears to have " "stopped flapping (%.1f%% change < %.1f%% threshold)" % (self.host_name, self.get_name(), change_ratio, threshold) ) self.broks.append(brok) def raise_no_next_check_log_entry(self): """Raise no scheduled check entry (warning level) Format is : "I cannot schedule the check for the service '*get_name()*' on host '*host_name*' because there is not future valid time" Example : "I cannot schedule the check for the service 'Load' on host 'Server' because there is not future valid time" :return: None """ logger.warning("I cannot schedule the check for the service '%s' on " "host '%s' because there is not future valid time", self.get_name(), self.host_name) def raise_acknowledge_log_entry(self): """Raise SERVICE ACKNOWLEDGE STARTED entry (critical level) :return: None """ if not self.__class__.log_acknowledgements: return brok = make_monitoring_log( 'info', "SERVICE ACKNOWLEDGE ALERT: %s;%s;STARTED; Service problem has been acknowledged" % (self.host_name, self.get_name()) ) self.broks.append(brok) def raise_unacknowledge_log_entry(self): """Raise SERVICE ACKNOWLEDGE STOPPED entry (critical level) :return: None """ if not self.__class__.log_acknowledgements: return brok = make_monitoring_log( 'info', "SERVICE ACKNOWLEDGE ALERT: %s;%s;EXPIRED; Service problem acknowledge expired" % (self.host_name, self.get_name()) ) self.broks.append(brok) def raise_enter_downtime_log_entry(self): """Raise SERVICE DOWNTIME ALERT entry (critical level) Format is : "SERVICE DOWNTIME ALERT: *host_name*;*get_name()*;STARTED; Service has entered a period of scheduled downtime" Example : "SERVICE DOWNTIME ALERT: test_host_0;Load;STARTED; Service has entered a period of scheduled downtime" :return: None """ if not self.__class__.log_downtimes: return brok = make_monitoring_log( 'info', "SERVICE DOWNTIME ALERT: %s;%s;STARTED; " "Service has entered a period of scheduled downtime" % (self.host_name, self.get_name()) ) self.broks.append(brok) def raise_exit_downtime_log_entry(self): """Raise SERVICE DOWNTIME ALERT entry (critical level) Format is : "SERVICE DOWNTIME ALERT: *host_name*;*get_name()*;STOPPED; Service has entered a period of scheduled downtime" Example : "SERVICE DOWNTIME ALERT: test_host_0;Load;STOPPED; Service has entered a period of scheduled downtime" :return: None """ if not self.__class__.log_downtimes: return brok = make_monitoring_log( 'info', "SERVICE DOWNTIME ALERT: %s;%s;STOPPED; Service " "has exited from a period of scheduled downtime" % (self.host_name, self.get_name()) ) self.broks.append(brok) def raise_cancel_downtime_log_entry(self): """Raise SERVICE DOWNTIME ALERT entry (critical level) Format is : "SERVICE DOWNTIME ALERT: *host_name*;*get_name()*;CANCELLED; Service has entered a period of scheduled downtime" Example : "SERVICE DOWNTIME ALERT: test_host_0;Load;CANCELLED; Service has entered a period of scheduled downtime" :return: None """ if not self.__class__.log_downtimes: return brok = make_monitoring_log( 'info', "SERVICE DOWNTIME ALERT: %s;%s;CANCELLED; " "Scheduled downtime for service has been cancelled." % (self.host_name, self.get_name()) ) self.broks.append(brok) def manage_stalking(self, check): """Check if the service need stalking or not (immediate recheck) If one stalking_options matches the exit_status ('o' <=> 0 ...) then stalk is needed Raise a log entry (info level) if stalk is needed :param check: finished check (check.status == 'waitconsume') :type check: alignak.check.Check :return: None """ need_stalk = False if check.status == u'waitconsume': if check.exit_status == 0 and 'o' in self.stalking_options: need_stalk = True elif check.exit_status == 1 and 'w' in self.stalking_options: need_stalk = True elif check.exit_status == 2 and 'check' in self.stalking_options: need_stalk = True elif check.exit_status == 3 and 'u' in self.stalking_options: need_stalk = True if check.output == self.output: need_stalk = False if need_stalk: logger.info("Stalking %s: %s", self.get_name(), check.output) def get_data_for_checks(self, hosts): """Get data for a check :return: list containing the service and the linked host :rtype: list """ return [hosts[self.host], self] def get_data_for_event_handler(self, hosts): """Get data for an event handler :return: list containing the service and the linked host :rtype: list """ return [hosts[self.host], self] def get_data_for_notifications(self, contact, notif, host_ref): """Get data for a notification :param contact: The contact to return :type contact: :param notif: the notification to return :type notif: :return: list containing the service, the host and the given parameters :rtype: list """ if not host_ref: return [self, contact, notif] return [host_ref, self, contact, notif] def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact): """Check if the notification is blocked by this contact. :param notifways: concerned notification ways :type notifways: alignak.objects.notificationway.NotificationWays :param timeperiods: concerned timeperiods :type timeperiods: alignak.objects.timeperiod.Timeperiods :param notif: notification created earlier :type notif: alignak.notification.Notification :param contact: contact we want to notify :type contact: alignak.objects.contact.Contact :return: True if the notification is blocked, False otherwise :rtype: bool """ return not contact.want_service_notification(notifways, timeperiods, self.last_chk, self.state, notif.type, self.business_impact, notif.command_call) def get_duration_sec(self): """Get duration in seconds. (cast it before returning) :return: duration in seconds :rtype: int TODO: Move to util or SchedulingItem class """ return str(int(self.duration_sec)) def get_duration(self): """Get duration formatted Format is : "HHh MMm SSs" Example : "10h 20m 40s" :return: Formatted duration :rtype: str TODO: Move to util or SchedulingItem class """ mins, secs = divmod(self.duration_sec, 60) hours, mins = divmod(mins, 60) return "%02dh %02dm %02ds" % (hours, mins, secs) def get_ack_author_name(self): """Get the author of the acknowledgement :return: author :rtype: str """ if self.acknowledgement is None: return '' return getattr(self.acknowledgement, "author", '') def get_ack_comment(self): """Get the comment of the acknowledgement :return: comment :rtype: str """ if self.acknowledgement is None: return '' return getattr(self.acknowledgement, "comment", '') def get_snapshot_command(self): """Wrapper to get the name of the snapshot_command attribute :return: snapshot_command name :rtype: str """ if not getattr(self, 'snapshot_command', None): return '' return self.snapshot_command.get_name() # pylint: disable=R0916 def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished): # pylint: disable=too-many-return-statements """Check if a notification is blocked by the service. Conditions are ONE of the following:: * enable_notification is False (global) * not in a notification_period * notifications_enable is False (local) * notification_options is 'n' or matches the state ('UNKNOWN' <=> 'u' ...) (include flapping and downtimes) * state goes ok and type is 'ACKNOWLEDGEMENT' (no sense) * scheduled_downtime_depth > 0 and flapping (host is in downtime) * scheduled_downtime_depth > 1 and not downtime end (deep downtime) * scheduled_downtime_depth > 0 and problem or recovery (host is in downtime) * SOFT state of a problem (we raise notification ony on HARD state) * ACK notification when already ACK (don't raise again ACK) * not flapping notification in a flapping state * business rule smart notifications is enabled and all its children have been acknowledged or are under downtime * linked host is not up * linked host is in downtime :param n_type: notification type :type n_type: :param t_wished: the time we should like to notify the host (mostly now) :type t_wished: float :return: True if ONE of the above condition was met, otherwise False :rtype: bool TODO: Refactor this, a lot of code duplication with Host.is_blocking_notifications """ logger.debug("Checking if a service %s (%s) notification is blocked...", self.get_full_name(), self.state) host = hosts[self.host] if t_wished is None: t_wished = time.time() # TODO # forced notification # pass if this is a custom notification # Block if notifications are program-wide disabled # Block if notifications are disabled for this service # Block if the current status is in the notification_options w,u,c,r,f,s if not self.enable_notifications or \ not self.notifications_enabled or \ 'n' in self.notification_options: logger.debug("Service: %s, notification %s sending is blocked by configuration", self.get_name(), n_type) return True # Does the notification period allow sending out this notification? if notification_period is not None and not notification_period.is_time_valid(t_wished): logger.debug("Service: %s, notification %s sending is blocked by globals", self.get_name(), n_type) return True if n_type in (u'PROBLEM', u'RECOVERY') and ( self.state == u'UNKNOWN' and 'u' not in self.notification_options or self.state == u'WARNING' and 'w' not in self.notification_options or self.state == u'CRITICAL' and 'c' not in self.notification_options or self.state == u'OK' and 'r' not in self.notification_options or self.state == u'UNREACHABLE' and 'x' not in self.notification_options): logger.debug("Service: %s, notification %s sending is blocked by options: %s", self.get_name(), n_type, self.notification_options) return True if (n_type in [u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'] and 'f' not in self.notification_options): logger.debug("Service: %s, notification %s sending is blocked by options: %s", n_type, self.get_full_name(), self.notification_options) return True if (n_type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'] and 's' not in self.notification_options): logger.debug("Service: %s, notification %s sending is blocked by options: %s", n_type, self.get_full_name(), self.notification_options) return True # Acknowledgements make no sense when the status is ok/up if n_type in [u'ACKNOWLEDGEMENT'] and self.state == self.ok_up: logger.debug("Host: %s, notification %s sending is blocked by current state", self.get_name(), n_type) return True # Block if host is in a scheduled downtime if host.scheduled_downtime_depth > 0: logger.debug("Service: %s, notification %s sending is blocked by downtime", self.get_name(), n_type) return True # When in deep downtime, only allow end-of-downtime notifications # In depth 1 the downtime just started and can be notified if self.scheduled_downtime_depth > 1 and n_type not in (u'DOWNTIMEEND', u'DOWNTIMECANCELLED'): logger.debug("Service: %s, notification %s sending is blocked by deep downtime", self.get_name(), n_type) return True # Block if in a scheduled downtime and a problem arises, or flapping event if self.scheduled_downtime_depth > 0 and n_type in \ [u'PROBLEM', u'RECOVERY', u'ACKNOWLEDGEMENT', u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED']: logger.debug("Service: %s, notification %s sending is blocked by downtime", self.get_name(), n_type) return True # Block if the status is SOFT # Block if the problem has already been acknowledged # Block if flapping # Block if host is down if self.state_type == u'SOFT' and n_type == u'PROBLEM' or \ self.problem_has_been_acknowledged and n_type != u'ACKNOWLEDGEMENT' or \ self.is_flapping and n_type not in [u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'] or \ host.state != host.ok_up: logger.debug("Service: %s, notification %s sending is blocked by soft state, " "acknowledgement, flapping or host DOWN", self.get_name(), n_type) return True # Block if business rule smart notifications is enabled and all its # children have been acknowledged or are under downtime. if self.got_business_rule is True \ and self.business_rule_smart_notifications is True \ and self.business_rule_notification_is_blocked(hosts, services) is True \ and n_type == u'PROBLEM': logger.debug("Service: %s, notification %s sending is blocked by business rules", self.get_name(), n_type) return True logger.debug("Service: %s, notification %s sending is not blocked", self.get_name(), n_type) return False def get_short_status(self, hosts, services): """Get the short status of this host :return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state :rtype: str """ mapping = { 0: "O", 1: "W", 2: "C", 3: "U", 4: "N", } if self.got_business_rule: return mapping.get(self.business_rule.get_state(hosts, services), "n/a") return mapping.get(self.state_id, "n/a") def get_status(self, hosts, services): """Get the status of this host :return: "OK", "WARNING", "CRITICAL", "UNKNOWN" or "n/a" based on service state_id or business_rule state :rtype: str """ if self.got_business_rule: mapping = { 0: u'OK', 1: u'WARNING', 2: u'CRITICAL', 3: u'UNKNOWN', 4: u'UNREACHABLE', } return mapping.get(self.business_rule.get_state(hosts, services), "n/a") return self.state def get_downtime(self): """Accessor to scheduled_downtime_depth attribute :return: scheduled downtime depth :rtype: str TODO: Move to util or SchedulingItem class """ return str(self.scheduled_downtime_depth)
class Service(SchedulingItem): '''Service class implements monitoring concepts for service. For example it defines parents, check_interval, check_command etc. ''' def __str__(self): pass @property def realm(self): '''Get the service realm... indeed it is the service's host one!''' pass @property def overall_state_id(self): '''Get the service overall state. The service overall state identifier is the service status including: - the monitored state - the acknowledged state - the downtime state The overall state is (prioritized): - a service is not monitored (5) - a service critical or unreachable (4) - a service warning or unknown (3) - a service downtimed (2) - a service acknowledged (1) - a service ok (0) *Note* that services in unknown state are considered as warning, and unreachable ones are considered as critical! Also note that the service state is considered only for HARD state type! ''' pass def fill_predictive_missing_parameters(self): '''define state with initial_state :return: None ''' pass @property def host_service(self): '''Unique key for a service :return: Tuple with host_name and service_description :rtype: tuple ''' pass @property def display_name(self): '''Display_name if defined, else service_description :return: service description or service display_name :rtype: str ''' pass @display_name.setter def display_name(self): '''Setter for display_name attribute :param display_name: value to set :return: None ''' pass def get_host_name(self): '''Returns the service host name :return: service' host name :rtype: str ''' pass def get_full_name(self): '''Get the full name for debugging (host_name/service_description) :return: service full name :rtype: str ''' pass def get_servicegroups(self): '''Accessor to servicegroups attribute :return: servicegroup list object of host :rtype: list ''' pass def get_groupnames(self, sgs): '''Get servicegroups list :return: comma separated list of servicegroups :rtype: str ''' pass def get_hostgroups(self, hosts): '''Wrapper to access hostgroups attribute of host attribute :return: service hostgroups (host one) :rtype: alignak.objects.hostgroup.Hostgroups ''' pass def get_host_tags(self, hosts): '''Wrapper to access tags attribute of host attribute :return: service tags (host one) :rtype: alignak.objects.tag.Tags ''' pass def get_service_tags(self): '''Accessor to tags attribute :return: service tags :rtype: alignak.objects.tag.Tags ''' pass def is_correct(self): '''Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool ''' pass def duplicate(self, host): '''For a given host, look for all copy we must create for for_each property :param host: alignak host object :type host: alignak.objects.host.Host :return: list :rtype: list ''' pass def set_state_from_exit_status(self, status, notif_period, hosts, services): '''Set the state in UP, WARNING, CRITICAL, UNKNOWN or UNREACHABLE according to the status of a check result. :param status: integer between 0 and 4 :type status: int :return: None ''' pass def is_state(self, status): '''Return True if status match the current service status :param status: status to compare ( "o", "c", "w", "u", "x"). Usually comes from config files :type status: str :return: True if status <=> self.status, otherwise False :rtype: bool ''' pass def last_time_non_ok_or_up(self): '''Get the last time the service was in a non-OK state :return: the nearest last time the service was not ok :rtype: int ''' pass def raise_check_result(self): '''Raise ACTIVE CHECK RESULT entry Example : "ACTIVE SERVICE CHECK: server;DOWN;HARD;1;I don't know what to say..." :return: None ''' pass def raise_alert_log_entry(self): '''Raise SERVICE ALERT entry Format is : "SERVICE ALERT: *host.get_name()*;*get_name()*;*state*;*state_type*;*attempt* ;*output*" Example : "SERVICE ALERT: server;Load;DOWN;HARD;1;I don't know what to say..." :return: None ''' pass def raise_initial_state(self): '''Raise SERVICE HOST ALERT entry (info level) Format is : "SERVICE HOST STATE: *host.get_name()*;*get_name()*;*state*;*state_type* ;*attempt*;*output*" Example : "SERVICE HOST STATE: server;Load;DOWN;HARD;1;I don't know what to say..." :return: None ''' pass def raise_notification_log_entry(self, notif, contact, host_ref): '''Raise SERVICE NOTIFICATION entry (critical level) Format is : "SERVICE NOTIFICATION: *contact.get_name()*;*host_name*;*self.get_name()* ;*state*;*command.get_name()*;*output*" Example : "SERVICE NOTIFICATION: superadmin;server;Load;UP;notify-by-rss;no output" :param notif: notification object created by service alert :type notif: alignak.objects.notification.Notification :return: None ''' pass def raise_event_handler_log_entry(self, command): '''Raise SERVICE EVENT HANDLER entry (critical level) Format is : "SERVICE EVENT HANDLER: *host_name*;*self.get_name()*;*state*;*state_type* ;*attempt*;*command.get_name()*" Example : "SERVICE EVENT HANDLER: server;Load;UP;HARD;1;notify-by-rss" :param command: Handler launched :type command: alignak.objects.command.Command :return: None ''' pass def raise_snapshot_log_entry(self, command): '''Raise SERVICE SNAPSHOT entry (critical level) Format is : "SERVICE SNAPSHOT: *host_name*;*self.get_name()*;*state*;*state_type*; *attempt*;*command.get_name()*" Example : "SERVICE SNAPSHOT: server;Load;UP;HARD;1;notify-by-rss" :param command: Snapshot command launched :type command: alignak.objects.command.Command :return: None ''' pass def raise_flapping_start_log_entry(self, change_ratio, threshold): '''Raise SERVICE FLAPPING ALERT START entry (critical level) Format is : "SERVICE FLAPPING ALERT: *host_name*;*self.get_name()*;STARTED; Service appears to have started flapping (*change_ratio*% change >= *threshold*% threshold)" Example : "SERVICE FLAPPING ALERT: server;Load;STARTED; Service appears to have started flapping (50.6% change >= 50.0% threshold)" :param change_ratio: percent of changing state :param threshold: threshold (percent) to trigger this log entry :return: None ''' pass def raise_flapping_stop_log_entry(self, change_ratio, threshold): '''Raise SERVICE FLAPPING ALERT STOPPED entry (critical level) Format is : "SERVICE FLAPPING ALERT: *host_name*;*self.get_name()*;STOPPED; Service appears to have started flapping (*change_ratio*% change >= *threshold*% threshold)" Example : "SERVICE FLAPPING ALERT: server;Load;STOPPED; Service appears to have started flapping (50.6% change >= 50.0% threshold)" :param change_ratio: percent of changing state :type change_ratio: float :param threshold: threshold (percent) to trigger this log entry :type threshold: float :return: None ''' pass def raise_no_next_check_log_entry(self): '''Raise no scheduled check entry (warning level) Format is : "I cannot schedule the check for the service '*get_name()*' on host '*host_name*' because there is not future valid time" Example : "I cannot schedule the check for the service 'Load' on host 'Server' because there is not future valid time" :return: None ''' pass def raise_acknowledge_log_entry(self): '''Raise SERVICE ACKNOWLEDGE STARTED entry (critical level) :return: None ''' pass def raise_unacknowledge_log_entry(self): '''Raise SERVICE ACKNOWLEDGE STOPPED entry (critical level) :return: None ''' pass def raise_enter_downtime_log_entry(self): '''Raise SERVICE DOWNTIME ALERT entry (critical level) Format is : "SERVICE DOWNTIME ALERT: *host_name*;*get_name()*;STARTED; Service has entered a period of scheduled downtime" Example : "SERVICE DOWNTIME ALERT: test_host_0;Load;STARTED; Service has entered a period of scheduled downtime" :return: None ''' pass def raise_exit_downtime_log_entry(self): '''Raise SERVICE DOWNTIME ALERT entry (critical level) Format is : "SERVICE DOWNTIME ALERT: *host_name*;*get_name()*;STOPPED; Service has entered a period of scheduled downtime" Example : "SERVICE DOWNTIME ALERT: test_host_0;Load;STOPPED; Service has entered a period of scheduled downtime" :return: None ''' pass def raise_cancel_downtime_log_entry(self): '''Raise SERVICE DOWNTIME ALERT entry (critical level) Format is : "SERVICE DOWNTIME ALERT: *host_name*;*get_name()*;CANCELLED; Service has entered a period of scheduled downtime" Example : "SERVICE DOWNTIME ALERT: test_host_0;Load;CANCELLED; Service has entered a period of scheduled downtime" :return: None ''' pass def manage_stalking(self, check): '''Check if the service need stalking or not (immediate recheck) If one stalking_options matches the exit_status ('o' <=> 0 ...) then stalk is needed Raise a log entry (info level) if stalk is needed :param check: finished check (check.status == 'waitconsume') :type check: alignak.check.Check :return: None ''' pass def get_data_for_checks(self, hosts): '''Get data for a check :return: list containing the service and the linked host :rtype: list ''' pass def get_data_for_event_handler(self, hosts): '''Get data for an event handler :return: list containing the service and the linked host :rtype: list ''' pass def get_data_for_notifications(self, contact, notif, host_ref): '''Get data for a notification :param contact: The contact to return :type contact: :param notif: the notification to return :type notif: :return: list containing the service, the host and the given parameters :rtype: list ''' pass def notification_is_blocked_by_contact(self, notifways, timeperiods, notif, contact): '''Check if the notification is blocked by this contact. :param notifways: concerned notification ways :type notifways: alignak.objects.notificationway.NotificationWays :param timeperiods: concerned timeperiods :type timeperiods: alignak.objects.timeperiod.Timeperiods :param notif: notification created earlier :type notif: alignak.notification.Notification :param contact: contact we want to notify :type contact: alignak.objects.contact.Contact :return: True if the notification is blocked, False otherwise :rtype: bool ''' pass def get_duration_sec(self): '''Get duration in seconds. (cast it before returning) :return: duration in seconds :rtype: int TODO: Move to util or SchedulingItem class ''' pass def get_duration_sec(self): '''Get duration formatted Format is : "HHh MMm SSs" Example : "10h 20m 40s" :return: Formatted duration :rtype: str TODO: Move to util or SchedulingItem class ''' pass def get_ack_author_name(self): '''Get the author of the acknowledgement :return: author :rtype: str ''' pass def get_ack_comment(self): '''Get the comment of the acknowledgement :return: comment :rtype: str ''' pass def get_snapshot_command(self): '''Wrapper to get the name of the snapshot_command attribute :return: snapshot_command name :rtype: str ''' pass def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished): '''Check if a notification is blocked by the service. Conditions are ONE of the following:: * enable_notification is False (global) * not in a notification_period * notifications_enable is False (local) * notification_options is 'n' or matches the state ('UNKNOWN' <=> 'u' ...) (include flapping and downtimes) * state goes ok and type is 'ACKNOWLEDGEMENT' (no sense) * scheduled_downtime_depth > 0 and flapping (host is in downtime) * scheduled_downtime_depth > 1 and not downtime end (deep downtime) * scheduled_downtime_depth > 0 and problem or recovery (host is in downtime) * SOFT state of a problem (we raise notification ony on HARD state) * ACK notification when already ACK (don't raise again ACK) * not flapping notification in a flapping state * business rule smart notifications is enabled and all its children have been acknowledged or are under downtime * linked host is not up * linked host is in downtime :param n_type: notification type :type n_type: :param t_wished: the time we should like to notify the host (mostly now) :type t_wished: float :return: True if ONE of the above condition was met, otherwise False :rtype: bool TODO: Refactor this, a lot of code duplication with Host.is_blocking_notifications ''' pass def get_short_status(self, hosts, services): '''Get the short status of this host :return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state :rtype: str ''' pass def get_status(self, hosts, services): '''Get the status of this host :return: "OK", "WARNING", "CRITICAL", "UNKNOWN" or "n/a" based on service state_id or business_rule state :rtype: str ''' pass def get_downtime(self): '''Accessor to scheduled_downtime_depth attribute :return: scheduled downtime depth :rtype: str TODO: Move to util or SchedulingItem class ''' pass
53
47
21
2
11
7
3
0.58
1
6
1
0
47
11
47
160
1,197
158
661
131
608
385
377
125
329
13
4
4
152
3,990
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/resultmodulation.py
alignak.objects.resultmodulation.Resultmodulations
class Resultmodulations(Items): """Resultmodulations class allowed to handle easily several CheckModulation objects """ name_property = "resultmodulation_name" inner_class = Resultmodulation def linkify(self, timeperiods): """Wrapper for linkify_rm_by_tp Replace check_period by real Timeperiod object into each Resultmodulation :param timeperiods: timeperiods to link to :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None """ self.linkify_with_timeperiods(timeperiods, 'modulation_period')
class Resultmodulations(Items): '''Resultmodulations class allowed to handle easily several CheckModulation objects ''' def linkify(self, timeperiods): '''Wrapper for linkify_rm_by_tp Replace check_period by real Timeperiod object into each Resultmodulation :param timeperiods: timeperiods to link to :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None ''' pass
2
2
9
1
2
6
1
1.6
1
0
0
0
1
0
1
46
16
3
5
4
3
8
5
4
3
1
2
0
1
3,991
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/service.py
alignak.objects.service.Services
class Services(SchedulingItems): """Class for the services lists. It's mainly for configuration """ inner_class = Service def add_template(self, template): """ Adds and index a template into the `templates` container. This implementation takes into account that a service has two naming attribute: `host_name` and `service_description`. :param template: The template to add :type template: :return: None """ name = template.get_name() service_description = getattr(template, 'service_description', '') host_name = getattr(template, 'host_name', '') logger.debug("Adding a %s template: host_name: %s, name: %s, service_description: %s", self.inner_class.my_type, host_name, name, service_description) if not name and not host_name: template.add_error("a %s template has been defined without name nor host_name. " "from: %s" % (self.inner_class.my_type, template.imported_from)) elif not name and not service_description: template.add_error("a %s template has been defined without name nor " "service_description. from: %s" % (self.inner_class.my_type, template.imported_from)) elif not name: # If name is not defined, use the host_name_service_description as name (fix #791) setattr(template, 'name', "%s_%s" % (host_name, service_description)) template = self.index_template(template) elif name: template = self.index_template(template) self.templates[template.uuid] = template logger.debug('\tAdded service template #%d %s', len(self.templates), template) def apply_inheritance(self): """ For all items and templates inherit properties and custom variables. :return: None """ super(Services, self).apply_inheritance() # add_item only ensure we can build a key for services later (after explode) for item in list(self.items.values()): self.add_item(item, False) def find_srvs_by_hostname(self, host_name): """Get all services from a host based on a host_name :param host_name: the host name we want services :type host_name: str :return: list of services :rtype: list[alignak.objects.service.Service] """ if hasattr(self, 'hosts'): host = self.hosts.find_by_name(host_name) if host is None: return None return host.get_services() return None def find_srv_by_name_and_hostname(self, host_name, service_description): """Get a specific service based on a host_name and service_description :param host_name: host name linked to needed service :type host_name: str :param service_description: service name we need :type service_description: str :return: the service found or None :rtype: alignak.objects.service.Service """ key = (host_name, service_description) return self.name_to_item.get(key, None) def linkify(self, hosts, commands, timeperiods, contacts, # pylint: disable=R0913 resultmodulations, businessimpactmodulations, escalations, servicegroups, checkmodulations, macromodulations): """Create link between objects:: * service -> host * service -> command * service -> timeperiods * service -> contacts :param hosts: hosts to link :type hosts: alignak.objects.host.Hosts :param timeperiods: timeperiods to link :type timeperiods: alignak.objects.timeperiod.Timeperiods :param commands: commands to link :type commands: alignak.objects.command.Commands :param contacts: contacts to link :type contacts: alignak.objects.contact.Contacts :param resultmodulations: resultmodulations to link :type resultmodulations: alignak.objects.resultmodulation.Resultmodulations :param businessimpactmodulations: businessimpactmodulations to link :type businessimpactmodulations: alignak.objects.businessimpactmodulation.Businessimpactmodulations :param escalations: escalations to link :type escalations: alignak.objects.escalation.Escalations :param servicegroups: servicegroups to link :type servicegroups: alignak.objects.servicegroup.Servicegroups :param checkmodulations: checkmodulations to link :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param macromodulations: macromodulations to link :type macromodulations: alignak.objects.macromodulation.Macromodulations :return: None """ self.linkify_with_timeperiods(timeperiods, 'notification_period') self.linkify_with_timeperiods(timeperiods, 'check_period') self.linkify_with_timeperiods(timeperiods, 'maintenance_period') self.linkify_with_timeperiods(timeperiods, 'snapshot_period') self.linkify_s_by_hst(hosts) self.linkify_s_by_sg(servicegroups) self.linkify_with_commands(commands, 'check_command') self.linkify_with_commands(commands, 'event_handler') self.linkify_with_commands(commands, 'snapshot_command') self.linkify_with_contacts(contacts) self.linkify_with_result_modulations(resultmodulations) self.linkify_with_business_impact_modulations(businessimpactmodulations) # WARNING: all escalations will not be link here # (just the escalation here, not serviceesca or hostesca). # This last one will be link in escalations linkify. self.linkify_with_escalations(escalations) self.linkify_with_check_modulations(checkmodulations) self.linkify_with_macro_modulations(macromodulations) def override_properties(self, hosts): """Handle service_overrides property for hosts ie : override properties for relevant services :param hosts: hosts we need to apply override properties :type hosts: alignak.objects.host.Hosts :return: None """ ovr_re = re.compile(r'^([^,]+),([^\s]+)\s+(.*)$') ovr_hosts = [h for h in hosts if getattr(h, 'service_overrides', None)] for host in ovr_hosts: # We're only looking for hosts having service overrides defined if isinstance(host.service_overrides, list): service_overrides = host.service_overrides else: service_overrides = [host.service_overrides] for ovr in service_overrides: # Checks service override syntax match = ovr_re.search(ovr) if match is None: host.add_error("invalid service override syntax: %s" % ovr) continue sdescr, prop, value = match.groups() # Looks for corresponding service service = self.find_srv_by_name_and_hostname(getattr(host, "host_name", ""), sdescr) if service is None: host.add_error("trying to override property '%s' on service '%s' " "but it's unknown for this host" % (prop, sdescr)) continue # Checks if override is allowed excludes = ['host_name', 'service_description', 'use', 'servicegroups', 'trigger_name'] if prop in excludes: host.add_error("trying to override '%s', a forbidden property " "for service '%s'" % (prop, sdescr)) continue # Pythonize the value because here value is str. setattr(service, prop, service.properties[prop].pythonize(value)) def optimize_service_search(self, hosts): """Setter for hosts attribute :param hosts: value to set :type hosts: alignak.objects.host.Hosts :return: """ self.hosts = hosts def linkify_s_by_hst(self, hosts): """Link services with their parent host :param hosts: Hosts to look for simple host :type hosts: alignak.objects.host.Hosts :return: None """ for serv in self: # If we do not have a host_name, we set it as # a template element to delete. (like Nagios) if not hasattr(serv, 'host_name'): serv.host = None continue try: hst_name = serv.host_name # The new member list, in id hst = hosts.find_by_name(hst_name) # Let the host know we are his service if hst is not None: serv.host = hst.uuid hst.add_service_link(serv.uuid) else: # Ok, the host do not exists! err = "Warning: the service '%s' got an invalid host_name '%s'" % \ (serv.get_name(), hst_name) serv.configuration_warnings.append(err) continue except AttributeError: pass # Will be catch at the is_correct moment def linkify_s_by_sg(self, servicegroups): """Link services with servicegroups :param servicegroups: Servicegroups :type servicegroups: alignak.objects.servicegroup.Servicegroups :return: None """ for service in self: new_servicegroups = [] if hasattr(service, 'servicegroups') and getattr(service, 'servicegroups'): for sg_name in service.servicegroups: sg_name = sg_name.strip() servicegroup = servicegroups.find_by_name(sg_name) if servicegroup is not None: new_servicegroups.append(servicegroup.uuid) else: service.add_error("Error: the servicegroup '%s' of the service '%s' " "is unknown" % (sg_name, service.get_full_name())) service.servicegroups = new_servicegroups def delete_services_by_id(self, ids): """Delete a list of services :param ids: ids list to delete :type ids: list :return: None """ for s_id in ids: del self[s_id] def apply_implicit_inheritance(self, hosts): """Apply implicit inheritance for special properties: contact_groups, notification_interval , notification_period So service will take info from host if necessary :param hosts: hosts list needed to look for a simple host :type hosts: alignak.objects.host.Hosts :return: None """ for prop in ('contacts', 'contact_groups', 'notification_interval', 'notification_period', 'resultmodulations', 'business_impact_modulations', 'escalations', 'poller_tag', 'reactionner_tag', 'check_period', 'business_impact', 'maintenance_period'): for serv in self: if hasattr(serv, 'host_name') and not getattr(serv, prop, None): host = hosts.find_by_name(serv.host_name) if host is not None and hasattr(host, prop): logger.debug("Implicit inheritance for %s/%s: %s = %s", serv.host_name, serv, prop, getattr(host, prop)) setattr(serv, prop, getattr(host, prop)) def apply_dependencies(self, hosts): """Loop over services and fill host dependency :return: None """ for service in self: if service.host and service.host_dependency_enabled: host = hosts[service.host] if host.active_checks_enabled: # Add host in the list service.act_depend_of.append( (service.host, ['d', 'x', 's', 'f'], '', True) ) # Add service in the host host.act_depend_of_me.append( (service.uuid, ['d', 'x', 's', 'f'], '', True) ) # Parent / children relations between host and service if service.uuid not in host.child_dependencies: host.child_dependencies.append(service.uuid) if service.host not in service.parent_dependencies: service.parent_dependencies.append(service.host) def clean(self): """Remove services without host object linked to Note that this should not happen! :return: None """ to_del = [] for service in self: if not service.host: to_del.append(service.uuid) for service_uuid in to_del: del self.items[service_uuid] super(Services, self).clean() def explode_services_from_hosts(self, hosts, service, hnames): """ Explodes a service based on a list of hosts. :param hosts: The hosts container :type hosts: :param service: The base service to explode :type service: :param hnames: The host_name list to explode service on :type hnames: str :return: None """ duplicate_for_hosts = [] # get the list of our host_names if more than 1 not_hosts = [] # the list of !host_name so we remove them after for hname in hnames: hname = hname.strip() # If the name begin with a !, we put it in # the not list if hname.startswith('!'): not_hosts.append(hname[1:]) else: # the standard list duplicate_for_hosts.append(hname) # remove duplicate items from duplicate_for_hosts: duplicate_for_hosts = list(set(duplicate_for_hosts)) # Ok now we clean the duplicate_for_hosts with all hosts # of the not for hname in not_hosts: try: duplicate_for_hosts.remove(hname) except IndexError: pass # Now we duplicate the service for all host_names for hname in duplicate_for_hosts: host = hosts.find_by_name(hname) if host is None: service.add_error("Error: The hostname %s is unknown for the service %s!" % (hname, service.get_name())) continue if host.is_excluded_for(service): continue new_s = service.copy() new_s.host_name = hname self.add_item(new_s) # pylint: disable=inconsistent-return-statements def _local_create_service(self, hosts, host_name, service): """Create a new service based on a host_name and service instance. :param hosts: The hosts items instance. :type hosts: alignak.objects.host.Hosts :param host_name: The host_name to create a new service. :type host_name: str :param service: The service to be used as template. :type service: Service :return: The new service created. :rtype: alignak.objects.service.Service """ host = hosts.find_by_name(host_name.strip()) if host.is_excluded_for(service): return None # Creates a real service instance from the template new_s = service.copy() new_s.host_name = host_name new_s.register = True # Indicate which template is used. new_s.imported_from = u'alignak-template-' + service.get_full_name() self.add_item(new_s) return new_s def explode_services_from_templates(self, hosts, service_template): """ Explodes services from templates. All hosts holding the specified templates are bound with the service. :param hosts: The hosts container. :type hosts: alignak.objects.host.Hosts :param service_template: The service to explode. :type service_template: alignak.objects.service.Service :return: None """ hname = getattr(service_template, "host_name", None) if not hname: logger.debug("Service template %s is declared without an host_name", service_template.get_name()) return logger.debug("Explode services %s for the host: %s", service_template.get_name(), hname) # Now really create the services if is_complex_expr(hname): hnames = self.evaluate_hostgroup_expression( hname.strip(), hosts, hosts.templates, look_in='templates') for name in hnames: self._local_create_service(hosts, name, service_template) else: hnames = [n.strip() for n in hname.split(',') if n.strip()] for hname in hnames: for name in hosts.find_hosts_that_use_template(hname): self._local_create_service(hosts, name, service_template) def explode_services_duplicates(self, hosts, service): """ Explodes services holding a `duplicate_foreach` clause. :param hosts: The hosts container :type hosts: alignak.objects.host.Hosts :param service: The service to explode :type service: alignak.objects.service.Service """ hname = getattr(service, "host_name", None) if hname is None: return # the generator case, we must create several new services # we must find our host, and get all key:value we need host = hosts.find_by_name(hname.strip()) if host is None: service.add_error('Error: The hostname %s is unknown for the service %s!' % (hname, service.get_name())) return # Duplicate services for new_s in service.duplicate(host): if host.is_excluded_for(new_s): continue # Adds concrete instance self.add_item(new_s) @staticmethod def register_service_into_servicegroups(service, servicegroups): """ Registers a service into the service groups declared in its `servicegroups` attribute. :param service: The service to register :type service: :param servicegroups: The servicegroups container :type servicegroups: :return: None """ if hasattr(service, 'service_description'): sname = service.service_description shname = getattr(service, 'host_name', '') if hasattr(service, 'servicegroups'): # Todo: See if we can remove this if if isinstance(service.servicegroups, list): sgs = service.servicegroups else: sgs = service.servicegroups.split(',') for servicegroup in sgs: servicegroups.add_member([shname, sname], servicegroup.strip()) @staticmethod def register_service_dependencies(service, servicedependencies): """ Registers a service dependencies. :param service: The service to register :type service: :param servicedependencies: The servicedependencies container :type servicedependencies: :return: None """ # We explode service_dependencies into Servicedependency svc_deps = [d.strip() for d in getattr(service, "service_dependencies", [])] i = 0 host_name = service.host_name for elt in svc_deps: # %2=0 is for hosts, %2!=0 is for service_description if i % 2 == 0: host_name = elt continue service_description = elt # we can register it (service) (depend on) -> (hname, desc) # If we do not have enough data for service, it'service no use if hasattr(service, 'service_description') and hasattr(service, 'host_name'): servicedependencies.add_service_dependency( service.host_name, service.service_description, host_name, service_description) i += 1 # We create new service if necessary (host groups and co) def explode(self, hosts, hostgroups, contactgroups, servicegroups, servicedependencies): # pylint: disable=too-many-locals """ Explodes services, from host, hostgroups, contactgroups, servicegroups and dependencies. :param hosts: The hosts container :type hosts: [alignak.object.host.Host] :param hostgroups: The hosts goups container :type hostgroups: [alignak.object.hostgroup.Hostgroup] :param contactgroups: The contacts goups container :type contactgroups: [alignak.object.contactgroup.Contactgroup] :param servicegroups: The services goups container :type servicegroups: [alignak.object.servicegroup.Servicegroup] :param servicedependencies: The services dependencies container :type servicedependencies: [alignak.object.servicedependency.Servicedependency] :return: None """ # Then for every service create a copy of the service with just the host # because we are adding services, we can't just loop in it itemkeys = list(self.items.keys()) for s_id in itemkeys: serv = self.items[s_id] # items::explode_host_groups_into_hosts # take all hosts from our hostgroup_name into our host_name property self.explode_host_groups_into_hosts(serv, hosts, hostgroups) # items::explode_contact_groups_into_contacts # take all contacts from our contact_groups into our contact property self.explode_contact_groups_into_contacts(serv, contactgroups) hnames = getattr(serv, "host_name", '') hnames = list(set([n.strip() for n in hnames.split(',') if n.strip()])) # hnames = strip_and_uniq(hnames) # We will duplicate if we have multiple host_name # or if we are a template (so a clean service) if len(hnames) == 1: self.index_item(serv) else: if len(hnames) >= 2: self.explode_services_from_hosts(hosts, serv, hnames) # Delete expanded source service, even if some errors exist self.remove_item(serv) for s_id in self.templates: template = self.templates[s_id] self.explode_contact_groups_into_contacts(template, contactgroups) self.explode_services_from_templates(hosts, template) # Explode services that have a duplicate_foreach clause duplicates = [serv.uuid for serv in self if getattr(serv, 'duplicate_foreach', '')] for s_id in duplicates: serv = self.items[s_id] self.explode_services_duplicates(hosts, serv) if not serv.configuration_errors: self.remove_item(serv) to_remove = [] for service in self: host = hosts.find_by_name(service.host_name) if host and host.is_excluded_for(service): to_remove.append(service) for service in to_remove: self.remove_item(service) # Servicegroups property need to be fulfill for got the information # And then just register to this service_group for serv in self: self.register_service_into_servicegroups(serv, servicegroups) self.register_service_dependencies(serv, servicedependencies) def fill_predictive_missing_parameters(self): """Loop on services and call Service.fill_predictive_missing_parameters() :return: None """ for service in self: service.fill_predictive_missing_parameters()
class Services(SchedulingItems): '''Class for the services lists. It's mainly for configuration ''' def add_template(self, template): ''' Adds and index a template into the `templates` container. This implementation takes into account that a service has two naming attribute: `host_name` and `service_description`. :param template: The template to add :type template: :return: None ''' pass def apply_inheritance(self): ''' For all items and templates inherit properties and custom variables. :return: None ''' pass def find_srvs_by_hostname(self, host_name): '''Get all services from a host based on a host_name :param host_name: the host name we want services :type host_name: str :return: list of services :rtype: list[alignak.objects.service.Service] ''' pass def find_srv_by_name_and_hostname(self, host_name, service_description): '''Get a specific service based on a host_name and service_description :param host_name: host name linked to needed service :type host_name: str :param service_description: service name we need :type service_description: str :return: the service found or None :rtype: alignak.objects.service.Service ''' pass def linkify(self, hosts, commands, timeperiods, contacts, # pylint: disable=R0913 resultmodulations, businessimpactmodulations, escalations, servicegroups, checkmodulations, macromodulations): '''Create link between objects:: * service -> host * service -> command * service -> timeperiods * service -> contacts :param hosts: hosts to link :type hosts: alignak.objects.host.Hosts :param timeperiods: timeperiods to link :type timeperiods: alignak.objects.timeperiod.Timeperiods :param commands: commands to link :type commands: alignak.objects.command.Commands :param contacts: contacts to link :type contacts: alignak.objects.contact.Contacts :param resultmodulations: resultmodulations to link :type resultmodulations: alignak.objects.resultmodulation.Resultmodulations :param businessimpactmodulations: businessimpactmodulations to link :type businessimpactmodulations: alignak.objects.businessimpactmodulation.Businessimpactmodulations :param escalations: escalations to link :type escalations: alignak.objects.escalation.Escalations :param servicegroups: servicegroups to link :type servicegroups: alignak.objects.servicegroup.Servicegroups :param checkmodulations: checkmodulations to link :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param macromodulations: macromodulations to link :type macromodulations: alignak.objects.macromodulation.Macromodulations :return: None ''' pass def override_properties(self, hosts): '''Handle service_overrides property for hosts ie : override properties for relevant services :param hosts: hosts we need to apply override properties :type hosts: alignak.objects.host.Hosts :return: None ''' pass def optimize_service_search(self, hosts): '''Setter for hosts attribute :param hosts: value to set :type hosts: alignak.objects.host.Hosts :return: ''' pass def linkify_s_by_hst(self, hosts): '''Link services with their parent host :param hosts: Hosts to look for simple host :type hosts: alignak.objects.host.Hosts :return: None ''' pass def linkify_s_by_sg(self, servicegroups): '''Link services with servicegroups :param servicegroups: Servicegroups :type servicegroups: alignak.objects.servicegroup.Servicegroups :return: None ''' pass def delete_services_by_id(self, ids): '''Delete a list of services :param ids: ids list to delete :type ids: list :return: None ''' pass def apply_implicit_inheritance(self, hosts): '''Apply implicit inheritance for special properties: contact_groups, notification_interval , notification_period So service will take info from host if necessary :param hosts: hosts list needed to look for a simple host :type hosts: alignak.objects.host.Hosts :return: None ''' pass def apply_dependencies(self, hosts): '''Loop over services and fill host dependency :return: None ''' pass def clean(self): '''Remove services without host object linked to Note that this should not happen! :return: None ''' pass def explode_services_from_hosts(self, hosts, service, hnames): ''' Explodes a service based on a list of hosts. :param hosts: The hosts container :type hosts: :param service: The base service to explode :type service: :param hnames: The host_name list to explode service on :type hnames: str :return: None ''' pass def _local_create_service(self, hosts, host_name, service): '''Create a new service based on a host_name and service instance. :param hosts: The hosts items instance. :type hosts: alignak.objects.host.Hosts :param host_name: The host_name to create a new service. :type host_name: str :param service: The service to be used as template. :type service: Service :return: The new service created. :rtype: alignak.objects.service.Service ''' pass def explode_services_from_templates(self, hosts, service_template): ''' Explodes services from templates. All hosts holding the specified templates are bound with the service. :param hosts: The hosts container. :type hosts: alignak.objects.host.Hosts :param service_template: The service to explode. :type service_template: alignak.objects.service.Service :return: None ''' pass def explode_services_duplicates(self, hosts, service): ''' Explodes services holding a `duplicate_foreach` clause. :param hosts: The hosts container :type hosts: alignak.objects.host.Hosts :param service: The service to explode :type service: alignak.objects.service.Service ''' pass @staticmethod def register_service_into_servicegroups(service, servicegroups): ''' Registers a service into the service groups declared in its `servicegroups` attribute. :param service: The service to register :type service: :param servicegroups: The servicegroups container :type servicegroups: :return: None ''' pass @staticmethod def register_service_dependencies(service, servicedependencies): ''' Registers a service dependencies. :param service: The service to register :type service: :param servicedependencies: The servicedependencies container :type servicedependencies: :return: None ''' pass def explode_services_from_hosts(self, hosts, service, hnames): ''' Explodes services, from host, hostgroups, contactgroups, servicegroups and dependencies. :param hosts: The hosts container :type hosts: [alignak.object.host.Host] :param hostgroups: The hosts goups container :type hostgroups: [alignak.object.hostgroup.Hostgroup] :param contactgroups: The contacts goups container :type contactgroups: [alignak.object.contactgroup.Contactgroup] :param servicegroups: The services goups container :type servicegroups: [alignak.object.servicegroup.Servicegroup] :param servicedependencies: The services dependencies container :type servicedependencies: [alignak.object.servicedependency.Servicedependency] :return: None ''' pass def fill_predictive_missing_parameters(self): '''Loop on services and call Service.fill_predictive_missing_parameters() :return: None ''' pass
24
22
26
2
13
10
4
0.79
1
5
0
0
19
1
21
73
569
70
282
92
256
224
244
88
222
11
4
4
90
3,992
Alignak-monitoring/alignak
Alignak-monitoring_alignak/alignak/objects/reactionnerlink.py
alignak.objects.reactionnerlink.ReactionnerLink
class ReactionnerLink(SatelliteLink): """ Class to manage the reactionner information """ my_type = 'reactionner' my_name_property = "%s_name" % my_type properties = SatelliteLink.properties.copy() properties.update({ 'type': StringProp(default='reactionner', fill_brok=[FULL_STATUS], to_send=True), 'reactionner_name': StringProp(default='', fill_brok=[FULL_STATUS]), 'port': IntegerProp(default=7769, fill_brok=[FULL_STATUS], to_send=True), 'reactionner_tags': ListProp(default=['None'], to_send=True), })
class ReactionnerLink(SatelliteLink): ''' Class to manage the reactionner information ''' pass
1
1
0
0
0
0
0
0.21
1
0
0
0
0
0
0
70
18
1
14
4
13
3
5
4
4
0
4
0
0
3,993
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests_integ/modules/arbiter_configuration/arbiter_configuration.py
tests_integ.modules.arbiter_configuration.arbiter_configuration.AlignakBackendArbiter
class AlignakBackendArbiter(BaseModule): # pylint: disable=too-many-public-methods """ This class is used to get configuration from alignak-backend """ def __init__(self, mod_conf): """Module initialization mod_conf is a dictionary that contains: - all the variables declared in the module configuration file - a 'properties' value that is the module properties as defined globally in this file :param mod_conf: module configuration file as a dictionary """ BaseModule.__init__(self, mod_conf) # pylint: disable=global-statement global logger logger = logging.getLogger('alignak.module.%s' % self.alias) logger.debug("inner properties: %s", self.__dict__) logger.debug("received configuration: %s", mod_conf.__dict__) self.my_arbiter = None self.bypass_verify_mode = int(getattr(mod_conf, 'bypass_verify_mode', 0)) == 1 logger.info("bypass objects loading when Arbiter is in verify mode: %s", self.bypass_verify_mode) self.verify_modification = int(getattr(mod_conf, 'verify_modification', 5)) logger.info("configuration reload check period: %s minutes", self.verify_modification) self.action_check = int(getattr(mod_conf, 'action_check', 15)) logger.info("actions check period: %s seconds", self.action_check) self.daemons_state = int(getattr(mod_conf, 'daemons_state', 60)) logger.info("daemons state update period: %s seconds", self.daemons_state) self.next_check = 0 self.next_action_check = 0 self.next_daemons_state = 0 # Configuration load/reload self.backend_date_format = "%a, %d %b %Y %H:%M:%S GMT" self.time_loaded_conf = datetime.utcnow().strftime(self.backend_date_format) self.configuration_reload_required = False self.configuration_reload_changelog = [] self.configraw = {} self.highlevelrealm = { 'level': 30000, 'name': '' } self.daemonlist = {'arbiter': {}, 'scheduler': {}, 'poller': {}, 'reactionner': {}, 'receiver': {}, 'broker': {}} self.config = {'commands': [], 'timeperiods': [], 'hosts': [], 'hostgroups': [], 'services': [], 'contacts': [], 'contactgroups': [], 'servicegroups': [], 'realms': [], 'hostdependencies': [], 'hostescalations': [], 'servicedependencies': [], 'serviceescalations': [], 'triggers': []} self.default_tp_always = None self.default_tp_never = None self.default_host_check_command = None self.default_service_check_command = None self.default_user = None self.alignak_configuration = {} # Common functions def do_loop_turn(self): """This function is called/used when you need a module with a loop function (and use the parameter 'external': True) """ logger.info("In loop") time.sleep(1) def hook_read_configuration(self, arbiter): """Hook in arbiter used on configuration parsing start. This is useful to get our arbiter object and its parameters. :param arbiter: alignak.daemons.arbiterdaemon.Arbiter :type arbiter: object :return: None """ self.my_arbiter = arbiter def get_alignak_configuration(self): """Get Alignak configuration from alignak-backend This function is an Arbiter hook called by the arbiter during its configuration loading. :return: alignak configuration parameters :rtype: dict """ self.alignak_configuration = {} start_time = time.time() try: logger.info("Loading Alignak configuration...") self.alignak_configuration = { 'name': 'my_alignak', 'alias': 'Test alignak configuration', # Boolean fields 'notifications_enabled': True, 'flap_detection_enabled': False, # Commands fields 'host_perfdata_command': 'None', 'service_perfdata_command': None, 'global_host_event_handler': 'check-host-alive', 'global_service_event_handler': 'check_service', '_TEST1': 'Test an extra non declared field', 'TEST2': 'One again - Test an extra non declared field', 'TEST3': 'And again - Test an extra non declared field', '_updated': 123456789, '_realm': None, '_sub_realm': True } except BackendException as exp: logger.warning("Alignak backend is not available for reading configuration. " "Backend communication error.") logger.debug("Exception: %s", exp) self.backend_connected = False return self.alignak_configuration self.time_loaded_conf = datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT") now = time.time() logger.info("Alignak configuration loaded in %s seconds", (now - start_time)) return self.alignak_configuration def hook_tick(self, arbiter): # pylint: disable=too-many-nested-blocks """Hook in arbiter used to check if configuration has changed in the backend since last configuration loaded :param arbiter: alignak.daemons.arbiterdaemon.Arbiter :type arbiter: object :return: None """ if not self.backend_connected: self.getToken() if self.raise_backend_alert(errors_count=10): logger.warning("Alignak backend connection is not available. " "Periodical actions are disabled: configuration change checking, " "ack/downtime/forced check, and daemons state updates.") return try: now = int(time.time()) if now > self.next_check: logger.info("Check if system configuration changed in the backend...") logger.debug("Now is: %s", datetime.utcnow().strftime(self.backend_date_format)) logger.debug("Last configuration loading time is: %s", self.time_loaded_conf) # todo: we should find a way to declare in the backend schema # that a resource endpoint is concerned with this feature. Something like: # 'arbiter_reload_check': True, # 'schema': {...} logger.debug("Check if system configuration changed in the backend...") resources = [ 'realm', 'command', 'timeperiod', 'usergroup', 'user', 'hostgroup', 'host', 'hostdependency', 'hostescalation', 'servicegroup', 'service', 'servicedependency', 'serviceescalation' ] self.configuration_reload_required = False for resource in resources: ret = self.backend.get(resource, {'where': '{"_updated":{"$gte": "' + self.time_loaded_conf + '"}}'}) if ret['_meta']['total'] > 0: logger.info(" - backend updated resource: %s, count: %d", resource, ret['_meta']['total']) self.configuration_reload_required = True for updated in ret['_items']: logger.debug(" -> updated: %s", updated) exists = [log for log in self.configuration_reload_changelog if log['resource'] == resource and log['item']['_id'] == updated['_id'] and log['item']['_updated'] == updated['_updated']] if not exists: self.configuration_reload_changelog.append({"resource": resource, "item": updated}) if self.configuration_reload_required: logger.warning("Hey, we must reload configuration from the backend!") try: with open(arbiter.pidfile, 'r') as f: arbiter_pid = f.readline() os.kill(int(arbiter_pid), signal.SIGHUP) message = "The configuration reload notification was " \ "raised to the arbiter (pid=%s)." % arbiter_pid self.configuration_reload_changelog.append({"resource": "backend-log", "item": { "_updated": now, "level": "INFO", "message": message }}) logger.error(message) except IOError: message = "The arbiter pid file (%s) is not available. " \ "Configuration reload notification was not raised." \ % arbiter.pidfile self.configuration_reload_changelog.append({"resource": "backend-log", "item": { "_updated": now, "level": "ERROR", "message": message }}) logger.error(message) except OSError: message = "The arbiter pid (%s) stored in file (%s) is not for an " \ "existing process. " \ "Configuration reload notification was not raised." \ % (arbiter_pid, arbiter.pidfile) self.configuration_reload_changelog.append({"resource": "backend-log", "item": { "_updated": now, "level": "ERROR", "message": message }}) logger.error(message) else: logger.debug("No changes found") self.next_check = now + (60 * self.verify_modification) logger.debug( "next configuration reload check in %s seconds ---", (self.next_check - now) ) if now > self.next_action_check: logger.debug("Check if acknowledgements are required...") self.get_acknowledge(arbiter) logger.debug("Check if downtime scheduling are required...") self.get_downtime(arbiter) logger.debug("Check if re-checks are required...") self.get_forcecheck(arbiter) self.next_action_check = now + self.action_check logger.debug("next actions check in %s seconds ---", (self.next_action_check - int(now))) if now > self.next_daemons_state: logger.debug("Update daemons state in the backend...") self.update_daemons_state(arbiter) self.next_daemons_state = now + self.daemons_state logger.debug( "next update daemons state in %s seconds ---", (self.next_daemons_state - int(now)) ) except Exception as exp: logger.warning("hook_tick exception: %s", str(exp)) logger.debug("Exception: %s", exp)
class AlignakBackendArbiter(BaseModule): ''' This class is used to get configuration from alignak-backend ''' def __init__(self, mod_conf): '''Module initialization mod_conf is a dictionary that contains: - all the variables declared in the module configuration file - a 'properties' value that is the module properties as defined globally in this file :param mod_conf: module configuration file as a dictionary ''' pass def do_loop_turn(self): '''This function is called/used when you need a module with a loop function (and use the parameter 'external': True) ''' pass def hook_read_configuration(self, arbiter): '''Hook in arbiter used on configuration parsing start. This is useful to get our arbiter object and its parameters. :param arbiter: alignak.daemons.arbiterdaemon.Arbiter :type arbiter: object :return: None ''' pass def get_alignak_configuration(self): '''Get Alignak configuration from alignak-backend This function is an Arbiter hook called by the arbiter during its configuration loading. :return: alignak configuration parameters :rtype: dict ''' pass def hook_tick(self, arbiter): '''Hook in arbiter used to check if configuration has changed in the backend since last configuration loaded :param arbiter: alignak.daemons.arbiterdaemon.Arbiter :type arbiter: object :return: None ''' pass
6
6
50
5
38
7
4
0.2
1
5
0
0
5
23
5
25
260
30
191
43
184
39
115
40
108
14
2
6
19
3,994
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_scheduler_clean_queue.py
tests.test_scheduler_clean_queue.TestSchedulerCleanQueue
class TestSchedulerCleanQueue(AlignakTest): """ This class test the cleaning queue in scheduler """ def setUp(self): super(TestSchedulerCleanQueue, self).setUp() def test_clean_broks(self): """ Test clean broks in scheduler :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router host.event_handler_enabled = False svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.event_handler_enabled = False # Define clean queue each time for the test # Set force the queues cleaning tick to be very high (no cleaning during the test) self._scheduler.pushed_conf.tick_clean_queues = 1000 self._scheduler.update_recurrent_works_tick({'tick_clean_queues': 1000}) self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']]) time.sleep(0.1) broks_limit = 5 * (len(self._scheduler.hosts) + len(self._scheduler.services)) broks_limit += 1 print("Broks limit is %d broks" % (broks_limit)) assert broks_limit == 16 broks = [] for broker in list(self._scheduler.my_daemon.brokers.values()): print("Broker: %s has %d broks" % (broker, len(broker.broks))) for brok in broker.broks: broks.append(brok) assert len(broker.broks) < broks_limit # Limit is not yet reached... 9 broks raised! assert len(broks) < broks_limit for _ in range(0, 10): self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 1, 'WARNING']]) time.sleep(0.1) self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']]) time.sleep(0.1) for broker in list(self._scheduler.my_daemon.brokers.values()): broks.extend(broker.broks) # Broker has too much broks! assert len(broker.broks) > broks_limit # Limit is reached! assert len(broks) > broks_limit # Change broks cleaning period to force cleaning self._scheduler.pushed_conf.tick_clean_queues = 1 self._scheduler.update_recurrent_works_tick({'tick_clean_queues': 1}) self.scheduler_loop(1) broks = [] for broker in list(self._scheduler.my_daemon.brokers.values()): print("Broker: %s has %d broks" % (broker, len(broker.broks))) broks.extend(broker.broks) assert len(broker.broks) < broks_limit # Limit is not yet reached... 9 broks raised! assert len(broks) < broks_limit def test_clean_checks(self): """ Test clean checks in scheduler :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router host.event_handler_enabled = False svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults svc.event_handler_enabled = False # Define clean queue each time for the test # Set force the queues cleanning tick self._scheduler.pushed_conf.tick_clean_queues = 1 self._scheduler.update_recurrent_works_tick({'tick_clean_queues': 1}) self._scheduler.update_recurrent_works_tick({'tick_delete_zombie_checks': 1}) self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']]) time.sleep(0.1) check_limit = 5 * (len(self._scheduler.hosts) + len(self._scheduler.services)) check_limit += 1 assert len(self._scheduler.checks) < check_limit for _ in range(0, (check_limit + 10)): host.next_chk = time.time() chk = host.launch_check(host.next_chk, self._scheduler.hosts, self._scheduler.services, self._scheduler.timeperiods, self._scheduler.macromodulations, self._scheduler.checkmodulations, self._scheduler.checks, force=False) self._scheduler.add_check(chk) time.sleep(0.1) assert len(self._scheduler.checks) > check_limit self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 1, 'WARNING']]) assert len(self._scheduler.checks) <= check_limit def test_clean_actions(self): """ Test clean actions in scheduler (like notifications) :return: None """ self.setup_with_file('cfg/cfg_default.cfg', dispatching=True) host = self._scheduler.hosts.find_by_name("test_host_0") host.checks_in_progress = [] host.act_depend_of = [] # ignore the router svc = self._scheduler.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") # To make tests quicker we make notifications send very quickly svc.notification_interval = 0.001 svc.checks_in_progress = [] svc.act_depend_of = [] # no hostchecks on critical checkresults # Define clean queue each time for the test self._scheduler.pushed_conf.tick_clean_queues = 1000 self._scheduler.update_recurrent_works_tick({'tick_clean_queues': 1000}) self._scheduler.update_recurrent_works_tick({'tick_delete_zombie_actions': 1000}) self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']]) time.sleep(0.1) action_limit = 5 * (len(self._scheduler.hosts) + len(self._scheduler.services)) action_limit += 1 assert len(self._scheduler.actions) < action_limit for _ in range(0, 10): self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 1, 'WARNING']]) time.sleep(0.1) self.scheduler_loop(1, [[host, 2, 'DOWN'], [svc, 0, 'OK']]) time.sleep(0.1) assert len(self._scheduler.actions) > action_limit # Set force the queues cleanning tick self._scheduler.pushed_conf.tick_clean_queues = 1 self._scheduler.update_recurrent_works_tick({'tick_clean_queues': 1}) self.scheduler_loop(1, [[host, 0, 'UP'], [svc, 1, 'WARNING']]) assert len(self._scheduler.actions) <= action_limit
class TestSchedulerCleanQueue(AlignakTest): ''' This class test the cleaning queue in scheduler ''' def setUp(self): pass def test_clean_broks(self): ''' Test clean broks in scheduler :return: None ''' pass def test_clean_checks(self): ''' Test clean checks in scheduler :return: None ''' pass def test_clean_actions(self): ''' Test clean actions in scheduler (like notifications) :return: None ''' pass
5
4
41
6
29
7
3
0.27
1
3
0
0
4
0
4
59
169
26
117
21
112
32
101
21
96
6
2
2
11
3,995
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_servicegroup.py
tests.test_servicegroup.TestServiceGroup
class TestServiceGroup(AlignakTest): """ This class tests the servicegroups """ def setUp(self): super(TestServiceGroup, self).setUp() def test_servicegroup(self): """ Default configuration service groups Default configuration has no loading problems ... as of it servicegroups are parsed correctly :return: None """ self.setup_with_file('cfg/cfg_default.cfg') assert self.conf_is_correct def test_look_for_alias(self): """ Services groups alias Default configuration has no loading problems ... as of it servicegroups are parsed correctly :return: None """ self.setup_with_file('cfg/servicegroup/alignak_groups_with_no_alias.cfg') assert self.conf_is_correct #  Found a servicegroup named NOALIAS sg = self._arbiter.conf.servicegroups.find_by_name("NOALIAS") assert isinstance(sg, Servicegroup) assert sg.get_name() == "NOALIAS" assert sg.alias == "" def test_servicegroup_members(self): """ Test if members are linked from group :return: None """ self.setup_with_file('cfg/servicegroup/alignak_servicegroup_members.cfg') assert self.conf_is_correct #  Found a servicegroup named allhosts_and_groups sg = self._arbiter.conf.servicegroups.find_by_name("allservices_and_groups") assert isinstance(sg, Servicegroup) assert sg.get_name() == "allservices_and_groups" assert len(self._arbiter.conf.servicegroups.get_members_of_group("allservices_and_groups")) == 1 assert len(sg.get_services()) == 1 assert len(sg.get_servicegroup_members()) == 4 def test_members_servicegroup(self): """ Test if group is linked from the member :return: None """ self.setup_with_file('cfg/servicegroup/alignak_servicegroup_members.cfg') assert self.conf_is_correct #  Found a servicegroup named allhosts_and_groups sg = self._arbiter.conf.servicegroups.find_by_name("allservices_and_groups") assert isinstance(sg, Servicegroup) assert sg.get_name() == "allservices_and_groups" assert len(self._arbiter.conf.servicegroups.get_members_of_group("allservices_and_groups")) == 1 assert len(sg.get_services()) == 1 print("List servicegroup services:") for service_id in sg.members: service = self._arbiter.conf.services[service_id] print(("Service: %s" % service)) assert isinstance(service, Service) if service.get_name() == 'test_ok_0': assert len(service.get_servicegroups()) == 4 for group_id in service.servicegroups: group = self._arbiter.conf.servicegroups[group_id] print(("Group: %s" % group)) assert group.get_name() in [ 'ok', 'servicegroup_01', 'servicegroup_02', 'allservices_and_groups' ] assert len(sg.get_servicegroup_members()) == 4 print("List servicegroup groups:") for group in sg.get_servicegroup_members(): print(("Group: %s" % group)) assert group in [ 'servicegroup_01', 'servicegroup_02', 'servicegroup_03', 'servicegroup_04' ] def test_servicegroup_with_no_service(self): """ Allow servicegroups with no services :return: None """ self.setup_with_file('cfg/servicegroup/alignak_servicegroup_no_service.cfg') assert self.conf_is_correct # Found a servicegroup named void sg = self._arbiter.conf.servicegroups.find_by_name("void") assert isinstance(sg, Servicegroup) assert sg.get_name() == "void" assert len(self._arbiter.conf.servicegroups.get_members_of_group("void")) == 0 print("Services: %s" % sg.get_servicegroup_members()) assert len(sg.get_servicegroup_members()) == 0 print("Services: %s" % sg.get_services()) assert len(sg.get_services()) == 0 def test_servicegroup_with_space(self): """ Test that servicegroups can have a name with spaces :return: None """ self.setup_with_file('cfg/cfg_default.cfg') assert self.conf_is_correct self.nb_servicegroups = len(self._arbiter.conf.servicegroups) self.setup_with_file('cfg/servicegroup/alignak_servicegroup_with_space.cfg') assert self.conf_is_correct # Two more groups than the default configuration assert len(self._arbiter.conf.servicegroups) == self.nb_servicegroups + 2 assert self._arbiter.conf.servicegroups.find_by_name("test_With Spaces").get_name() == \ "test_With Spaces" assert self._arbiter.conf.servicegroups.get_members_of_group( "test_With Spaces") is not [] assert self._arbiter.conf.servicegroups.find_by_name("test_With another Spaces").get_name() == \ "test_With another Spaces" assert self._arbiter.conf.servicegroups.get_members_of_group( "test_With another Spaces") is not [] def test_servicegroups_generated(self): """ Test that servicegroups can be built from service definition :return: None """ self.setup_with_file('cfg/servicegroup/alignak_servicegroups_generated.cfg') assert self.conf_is_correct self.nb_servicegroups = len(self._arbiter.conf.servicegroups) sgs = [] for name in ["MYSVCGP", "MYSVCGP2", "MYSVCGP3", "MYSVCGP4"]: sg = self._arbiter.conf.servicegroups.find_by_name(name) assert sg is not None sgs.append(sg) svc3 = self._arbiter.conf.services.find_srv_by_name_and_hostname("fake host", "fake svc3") svc4 = self._arbiter.conf.services.find_srv_by_name_and_hostname("fake host", "fake svc4") assert svc3.uuid in sgs[0].members assert svc3.uuid in sgs[1].members assert svc4.uuid in sgs[2].members assert svc4.uuid in sgs[3].members assert sgs[0].uuid in svc3.servicegroups assert sgs[1].uuid in svc3.servicegroups assert sgs[2].uuid in svc4.servicegroups assert sgs[3].uuid in svc4.servicegroups
class TestServiceGroup(AlignakTest): ''' This class tests the servicegroups ''' def setUp(self): pass def test_servicegroup(self): ''' Default configuration service groups Default configuration has no loading problems ... as of it servicegroups are parsed correctly :return: None ''' pass def test_look_for_alias(self): ''' Services groups alias Default configuration has no loading problems ... as of it servicegroups are parsed correctly :return: None ''' pass def test_servicegroup_members(self): ''' Test if members are linked from group :return: None ''' pass def test_members_servicegroup(self): ''' Test if group is linked from the member :return: None ''' pass def test_servicegroup_with_no_service(self): ''' Allow servicegroups with no services :return: None ''' pass def test_servicegroup_with_space(self): ''' Test that servicegroups can have a name with spaces :return: None ''' pass def test_servicegroups_generated(self): ''' Test that servicegroups can be built from service definition :return: None ''' pass
9
8
19
4
12
4
2
0.34
1
1
0
0
8
1
8
63
162
35
95
23
86
32
87
23
78
5
2
3
13
3,996
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_setup_new_conf.py
tests.test_setup_new_conf.TestSetupNewConf
class TestSetupNewConf(AlignakTest): """ This class will test load new conf for each modules (broker, scheduler...) """ def setUp(self): super(TestSetupNewConf, self).setUp() def test_several_loads(self): """ :return: """ for count in range(0, 5): perfdatas = [] my_process = psutil.Process() with my_process.oneshot(): perfdatas.append("cpu_percent=%.2f%%" % my_process.cpu_percent()) memory = my_process.memory_full_info() for key in memory._fields: perfdatas.append("mem_%s=%db" % (key, getattr(memory, key))) print("Process pid=%s, cpu/memory|%s" % (my_process.pid, " ".join(perfdatas))) self.test_conf_scheduler() def test_conf_scheduler(self): """ Test load new conf in scheduler :return: None """ self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) args = { 'env_file': self.env_filename, 'alignak_name': 'my-alignak', 'daemon_name': 'unset', } scheduler_daemon = schedulerdaemon(**args) # scheduler_daemon.load_modules_manager() scheduler_link = None for satellite in self._arbiter.dispatcher.schedulers: scheduler_link = satellite scheduler_daemon.new_conf = satellite.cfg break assert scheduler_link is not None # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7769, 7771, 7772]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) scheduler_daemon.setup_new_conf() assert 1 == len(scheduler_daemon.modules) assert scheduler_daemon.modules[0].module_alias == 'Example' assert scheduler_daemon.modules[0].option_1 == 'foo' assert scheduler_daemon.modules[0].option_2 == 'bar' assert scheduler_daemon.modules[0].option_3 == 'foobar' for host in scheduler_daemon.sched.pushed_conf.hosts: print("Host: %s" % host) # Two hosts declared in the configuration # One host provided by the Example module loaded in the arbiter assert 3 == len(scheduler_daemon.sched.pushed_conf.hosts) assert len(scheduler_daemon.pollers) == 1 assert len(scheduler_daemon.reactionners) == 1 assert len(scheduler_daemon.brokers) == 1 assert len(scheduler_daemon.schedulers) == 0 if scheduler_link.manage_arbiters: assert len(scheduler_daemon.arbiters) == 1 else: assert len(scheduler_daemon.arbiters) == 0 assert len(scheduler_daemon.receivers) == 0 # send new conf, so it's the second time. This to test the cleanup self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) scheduler_link = None for satellite in self._arbiter.dispatcher.schedulers: scheduler_link = satellite scheduler_daemon.new_conf = satellite.cfg break assert scheduler_link is not None # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7769, 7771, 7772]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) scheduler_daemon.setup_new_conf() assert 1 == len(scheduler_daemon.modules) assert scheduler_daemon.modules[0].module_alias == 'Example' assert scheduler_daemon.modules[0].option_1 == 'foo' assert scheduler_daemon.modules[0].option_2 == 'bar' assert scheduler_daemon.modules[0].option_3 == 'foobar' for host in scheduler_daemon.sched.pushed_conf.hosts: print(("Host: %s" % host)) # Two hosts declared in the configuration # One host provided by the Example module loaded in the arbiter assert 3 == len(scheduler_daemon.sched.pushed_conf.hosts) assert len(scheduler_daemon.pollers) == 1 assert len(scheduler_daemon.reactionners) == 1 assert len(scheduler_daemon.brokers) == 1 assert len(scheduler_daemon.schedulers) == 0 if scheduler_link.manage_arbiters: assert len(scheduler_daemon.arbiters) == 1 else: assert len(scheduler_daemon.arbiters) == 0 assert len(scheduler_daemon.receivers) == 0 # Stop launched modules scheduler_daemon.modules_manager.stop_all() def test_conf_receiver(self): """ Test load new conf in receiver :return: None """ self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) args = { 'env_file': self.env_filename, 'alignak_name': 'my-alignak', 'daemon_name': 'unset', } receiver = receiverdaemon(**args) # receiv.load_modules_manager() if hasattr(receiver, 'modules'): assert 0 == len(receiver.modules) for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'receiver': receiver.new_conf = satellite.cfg # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768, 7770]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) receiver.setup_new_conf() self.show_logs() assert 1 == len(receiver.modules) assert receiver.modules[0].module_alias == 'Example' assert receiver.modules[0].option_3 == 'foobar' # check get hosts # Two hosts declared in the configuration # On host provided by the Example module loaded in the arbiter assert len(receiver.hosts_schedulers) == 3 assert len(receiver.schedulers) == 1 # send new conf, so it's the second time. This test the cleanup self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'receiver': receiver.new_conf = satellite.cfg # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768, 7770]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) receiver.setup_new_conf() assert len(receiver.schedulers) == 1 # Stop launched modules receiver.modules_manager.stop_all() def test_conf_poller(self): """ Test load new conf in poller :return: None """ self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) args = { 'env_file': self.env_filename, 'alignak_name': 'my-alignak', 'daemon_name': 'unset', } poller = pollerdaemon(**args) # poller.load_modules_manager() if hasattr(poller, 'modules'): assert 0 == len(poller.modules) for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'poller': poller.new_conf = satellite.cfg # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) poller.setup_new_conf() assert 1 == len(poller.modules) assert poller.modules[0].module_alias == 'Example' assert poller.modules[0].option_1 == 'foo' assert poller.modules[0].option_2 == 'bar' assert poller.modules[0].option_3 == 'foobar' assert len(poller.schedulers) == 1 # send new conf, so it's the second time. This test the cleanup self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'poller': poller.new_conf = satellite.cfg # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) poller.setup_new_conf() assert len(poller.schedulers) == 1 # Stop launched modules poller.modules_manager.stop_all() def test_conf_broker(self): """ Test load new conf in broker :return: None """ self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) args = { 'env_file': self.env_filename, 'alignak_name': 'my-alignak', 'daemon_name': 'broker-master', } broker = brokerdaemon(**args) # broker.load_modules_manager() assert 1 == len(broker.modules) broker_link = None for satellite in self._arbiter.dispatcher.satellites: if satellite.name == 'broker-master': broker_link = satellite broker.new_conf = satellite.cfg break assert broker_link is not None # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768, 7769, 7771, 7773]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) mockreq.get('http://127.0.0.1:7768/fill_initial_broks', json=[]) mockreq.get('http://127.0.0.1:7768/get_managed_configurations', json={}) broker.setup_new_conf() # Check modules received configuration assert 1 == len(broker.modules) print(("Modules: %s" % broker.modules)) print((" - : %s" % broker.modules[0].__dict__)) assert broker.modules[0].module_alias == 'Example' assert broker.modules[0].option_1 == 'foo' assert broker.modules[0].option_2 == 'bar' assert broker.modules[0].option_3 == 'foobar' assert len(broker.schedulers) == 1 if broker_link.manage_arbiters: assert len(broker.arbiters) == 1 else: assert len(broker.arbiters) == 0 assert len(broker.pollers) == 1 assert len(broker.reactionners) == 1 assert len(broker.receivers) == 1 # send new conf, so it's the second time. This tests the cleanup self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) broker_link = None for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'broker': broker_link = satellite broker.new_conf = satellite.cfg break assert broker_link is not None # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768, 7769, 7771, 7773]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) mockreq.get('http://127.0.0.1:7768/fill_initial_broks', json=[]) mockreq.get('http://127.0.0.1:7768/get_managed_configurations', json={}) broker.setup_new_conf() assert len(broker.schedulers) == 1 if broker_link.manage_arbiters: assert len(broker.arbiters) == 1 else: assert len(broker.arbiters) == 0 assert len(broker.pollers) == 1 assert len(broker.reactionners) == 1 assert len(broker.receivers) == 1 # Stop launched modules broker.modules_manager.stop_all() def test_conf_reactionner(self): """ Test load new conf in reactionner :return: None """ self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) args = { 'env_file': self.env_filename, 'alignak_name': 'my-alignak', 'daemon_name': 'unset', } reactionner = reactionnerdaemon(**args) # reac.load_modules_manager() if hasattr(reactionner, 'modules'): assert 0 == len(reactionner.modules) for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'reactionner': reactionner.new_conf = satellite.cfg # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) # mockreq.get('http://127.0.0.1:7768/fill_initial_broks', json=[]) # mockreq.get('http://127.0.0.1:7768/get_managed_configurations', json={}) reactionner.setup_new_conf() assert 1 == len(reactionner.modules) assert reactionner.modules[0].module_alias == 'Example' assert reactionner.modules[0].option_1 == 'foo' assert reactionner.modules[0].option_2 == 'bar' assert reactionner.modules[0].option_3 == 'foobar' assert len(reactionner.schedulers) == 1 # send new conf, so it's the second time. This test the cleanup self.setup_with_file('cfg/cfg_default_with_modules.cfg', 'cfg/default_with_modules/alignak.ini', dispatching=True) for satellite in self._arbiter.dispatcher.satellites: if satellite.type == 'reactionner': reactionner.new_conf = satellite.cfg # Simulate the daemons HTTP interface (very simple simulation !) with requests_mock.mock() as mockreq: for port in [7768]: mockreq.get('http://127.0.0.1:%d/identity' % port, json={"start_time": 0, "running_id": 123456.123456}) reactionner.setup_new_conf() assert len(reactionner.schedulers) == 1 # Stop launched modules reactionner.modules_manager.stop_all()
class TestSetupNewConf(AlignakTest): ''' This class will test load new conf for each modules (broker, scheduler...) ''' def setUp(self): pass def test_several_loads(self): ''' :return: ''' pass def test_conf_scheduler(self): ''' Test load new conf in scheduler :return: None ''' pass def test_conf_receiver(self): ''' Test load new conf in receiver :return: None ''' pass def test_conf_poller(self): ''' Test load new conf in poller :return: None ''' pass def test_conf_broker(self): ''' Test load new conf in broker :return: None ''' pass def test_conf_reactionner(self): ''' Test load new conf in reactionner :return: None ''' pass
8
7
51
6
37
8
7
0.21
1
3
0
0
7
0
7
62
368
50
262
41
254
56
213
36
205
9
2
3
46
3,997
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_stats.py
tests.test_stats.TestStats
class TestStats(AlignakTest): """ This class test the stats """ def setUp(self): super(TestStats, self).setUp() def test_average_latency(self): """ Test average latency :return: None """ self.setup_with_file('cfg/cfg_stats.cfg', dispatching=True) svc0 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_0") svc1 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_1") svc2 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_2") svc3 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_3") svc4 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_4") svc5 = self._scheduler.services.find_srv_by_name_and_hostname( "test_host_0", "test_ok_5") self.scheduler_loop(1, [[svc0, 0, 'OK'], [svc1, 0, 'OK'], [svc2, 0, 'OK'], [svc3, 0, 'OK'], [svc4, 0, 'OK'], [svc5, 0, 'OK']]) now = time.time() svc0.latency = 0.96 svc1.latency = 0.884 svc2.latency = 0.92 svc3.latency = 1.3 svc4.latency = 0.95 svc5.latency = 0.78 svc0.last_chk = now-7 svc1.last_chk = now-1 svc2.last_chk = now svc3.last_chk = now-2 svc4.last_chk = now-5 svc5.last_chk = now-12 self._scheduler.get_latency_average_percentile() reference = { 'min': 0.89, 'max': 1.23, 'avg': 1.00, } assert reference['min'] == \ self._scheduler.stats['latency']['min'] assert reference['max'] == \ self._scheduler.stats['latency']['max'] assert reference['avg'] == \ self._scheduler.stats['latency']['avg']
class TestStats(AlignakTest): ''' This class test the stats ''' def setUp(self): pass def test_average_latency(self): ''' Test average latency :return: None ''' pass
3
2
28
5
22
2
1
0.13
1
1
0
0
2
0
2
57
61
10
45
11
42
6
30
11
27
1
2
0
2
3,998
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_statsd.py
tests.test_statsd.FakeCarbonServer
class FakeCarbonServer(threading.Thread): def __init__(self, port=0): super(FakeCarbonServer, self).__init__() self.setDaemon(True) self.port = port self.cli_socks = [] # will retain the client socks here sock = self.sock = socket.socket() sock.settimeout(1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('127.0.0.1', port)) if not port: self.port = sock.getsockname()[1] sock.listen(0) self.running = True print("Starting fake carbon server on %d" % port) self.start() def stop(self): self.running = False self.sock.close() def run(self): while self.running: try: sock, addr = self.sock.accept() except socket.error as err: pass else: # so that we won't block indefinitely in handle_connection # in case the client doesn't send anything : sock.settimeout(3) self.cli_socks.append(sock) self.handle_connection(sock) self.cli_socks.remove(sock) def handle_connection(self, sock): data = sock.recv(4096) print(("Fake carbon received: %s", data)) sock.close()
class FakeCarbonServer(threading.Thread): def __init__(self, port=0): pass def stop(self): pass def run(self): pass def handle_connection(self, sock): pass
5
0
9
0
8
1
2
0.09
1
2
0
0
4
4
4
29
39
3
34
12
29
3
34
11
29
3
1
2
7
3,999
Alignak-monitoring/alignak
Alignak-monitoring_alignak/tests/test_statsd.py
tests.test_statsd.FakeStatsdServer
class FakeStatsdServer(threading.Thread): def __init__(self, port=0): super(FakeStatsdServer, self).__init__() self.setDaemon(True) self.port = port self.cli_socks = [] # will retain the client socks here sock = self.sock = socket.socket() sock.settimeout(1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('127.0.0.1', port)) if not port: self.port = sock.getsockname()[1] sock.listen(0) self.running = True print("Starting fake StatsD server on %d" % port) self.start() def stop(self): self.running = False self.sock.close() def run(self): while self.running: try: sock, addr = self.sock.accept() except socket.error as err: pass else: # so that we won't block indefinitely in handle_connection # in case the client doesn't send anything : sock.settimeout(3) self.cli_socks.append(sock) self.handle_connection(sock) self.cli_socks.remove(sock) def handle_connection(self, sock): data = sock.recv(4096) print(("Fake StatsD received: %s", data)) sock.close()
class FakeStatsdServer(threading.Thread): def __init__(self, port=0): pass def stop(self): pass def run(self): pass def handle_connection(self, sock): pass
5
0
9
0
8
1
2
0.09
1
2
0
0
4
4
4
29
39
3
34
12
29
3
34
11
29
3
1
2
7