id
int64
0
843k
repository_name
stringlengths
7
55
file_path
stringlengths
9
332
class_name
stringlengths
3
290
human_written_code
stringlengths
12
4.36M
class_skeleton
stringlengths
19
2.2M
total_program_units
int64
1
9.57k
total_doc_str
int64
0
4.2k
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
300
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
176
CountClassBase
float64
0
48
CountClassCoupled
float64
0
589
CountClassCoupledModified
float64
0
581
CountClassDerived
float64
0
5.37k
CountDeclInstanceMethod
float64
0
4.2k
CountDeclInstanceVariable
float64
0
299
CountDeclMethod
float64
0
4.2k
CountDeclMethodAll
float64
0
4.2k
CountLine
float64
1
115k
CountLineBlank
float64
0
9.01k
CountLineCode
float64
0
94.4k
CountLineCodeDecl
float64
0
46.1k
CountLineCodeExe
float64
0
91.3k
CountLineComment
float64
0
27k
CountStmt
float64
1
93.2k
CountStmtDecl
float64
0
46.1k
CountStmtExe
float64
0
90.2k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
6k
7,900
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/task_parameters.py
lightflow.models.task_parameters.TaskParameters
class TaskParameters(dict): """ A class to store a mix of callable and native data type parameters for tasks. A single parameter can either be a callable returning a native data type or the native data type itself. This allows tasks do dynamically change their parameters based on the data flowing into the task or data in the data_store. The structure of the callable has to be either: my_method(data, data_store) or lambda data, data_store: Tasks that implement parameters create an object of the class in their __init__() method and populate it with the tasks attributes. In their run() method tasks then have to call the eval(data, data_store) method in order to evaluate any callables. """ def __init__(self, *args, **kwargs): """ Initialise the class by passing any arguments down to the dict base type. """ super().__init__(*args, **kwargs) self.update(*args, **kwargs) def __getattr__(self, key): """ Return the parameter value for a key using attribute-style dot notation. Args: key (str): The key that points to the parameter value that should be returned. Returns: str: The parameter value stored under the specified key. """ if key in self: return self[key] else: raise AttributeError() def __setattr__(self, key, value): """ Assign a parameter value to a key using attribute-style dot notation. Args: key (str): The key to which the parameter value should be assigned. value: The parameter value that should be assigned to the key. """ self[key] = value def __delattr__(self, key): """ Delete a parameter from the dictionary. Args: key (str): The key to the entry that should be deleted. Raise: AttributeError: if the key does not exist. """ if key in self: del self[key] else: raise AttributeError() def eval(self, data, data_store, *, exclude=None): """ Return a new object in which callable parameters have been evaluated. Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. exclude (list): List of key names as strings that should be excluded from the evaluation. Returns: TaskParameters: A new TaskParameters object with the callable parameters replaced by their return value. """ exclude = [] if exclude is None else exclude result = {} for key, value in self.items(): if key in exclude: continue if value is not None and callable(value): result[key] = value(data, data_store) else: result[key] = value return TaskParameters(result) def eval_single(self, key, data, data_store): """ Evaluate the value of a single parameter taking into account callables . Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: key (str): The name of the parameter that should be evaluated. data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. """ if key in self: value = self[key] if value is not None and callable(value): return value(data, data_store) else: return value else: raise AttributeError()
class TaskParameters(dict): ''' A class to store a mix of callable and native data type parameters for tasks. A single parameter can either be a callable returning a native data type or the native data type itself. This allows tasks do dynamically change their parameters based on the data flowing into the task or data in the data_store. The structure of the callable has to be either: my_method(data, data_store) or lambda data, data_store: Tasks that implement parameters create an object of the class in their __init__() method and populate it with the tasks attributes. In their run() method tasks then have to call the eval(data, data_store) method in order to evaluate any callables. ''' def __init__(self, *args, **kwargs): ''' Initialise the class by passing any arguments down to the dict base type. ''' pass def __getattr__(self, key): ''' Return the parameter value for a key using attribute-style dot notation. Args: key (str): The key that points to the parameter value that should be returned. Returns: str: The parameter value stored under the specified key. ''' pass def __setattr__(self, key, value): ''' Assign a parameter value to a key using attribute-style dot notation. Args: key (str): The key to which the parameter value should be assigned. value: The parameter value that should be assigned to the key. ''' pass def __delattr__(self, key): ''' Delete a parameter from the dictionary. Args: key (str): The key to the entry that should be deleted. Raise: AttributeError: if the key does not exist. ''' pass def eval(self, data, data_store, *, exclude=None): ''' Return a new object in which callable parameters have been evaluated. Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. exclude (list): List of key names as strings that should be excluded from the evaluation. Returns: TaskParameters: A new TaskParameters object with the callable parameters replaced by their return value. ''' pass def eval_single(self, key, data, data_store): ''' Evaluate the value of a single parameter taking into account callables . Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: key (str): The name of the parameter that should be evaluated. data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. ''' pass
7
7
15
2
6
7
2
1.56
1
2
0
0
6
0
6
33
113
21
36
10
29
56
31
10
24
5
2
2
14
7,901
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/task_signal.py
lightflow.models.task_signal.TaskSignal
class TaskSignal: """ Class to wrap the construction and sending of signals into easy to use methods.""" def __init__(self, client, dag_name): """ Initialise the task signal convenience class. Args: client (Client): A reference to a signal client object. dag_name (str): The name of the dag the task belongs to. """ self._client = client self._dag_name = dag_name def start_dag(self, dag, *, data=None): """ Schedule the execution of a dag by sending a signal to the workflow. Args: dag (Dag, str): The dag object or the name of the dag that should be started. data (MultiTaskData): The data that should be passed on to the new dag. Returns: str: The name of the successfully started dag. """ return self._client.send( Request( action='start_dag', payload={'name': dag.name if isinstance(dag, Dag) else dag, 'data': data if isinstance(data, MultiTaskData) else None} ) ).payload['dag_name'] def join_dags(self, names=None): """ Wait for the specified dags to terminate. This function blocks until the specified dags terminate. If no dags are specified wait for all dags of the workflow, except the dag of the task calling this signal, to terminate. Args: names (list): The names of the dags that have to terminate. Returns: bool: True if all the signal was sent successfully. """ return self._client.send( Request( action='join_dags', payload={'names': names} ) ).success def stop_dag(self, name=None): """ Send a stop signal to the specified dag or the dag that hosts this task. Args: name str: The name of the dag that should be stopped. If no name is given the dag that hosts this task is stopped. Upon receiving the stop signal, the dag will not queue any new tasks and wait for running tasks to terminate. Returns: bool: True if the signal was sent successfully. """ return self._client.send( Request( action='stop_dag', payload={'name': name if name is not None else self._dag_name} ) ).success def stop_workflow(self): """ Send a stop signal to the workflow. Upon receiving the stop signal, the workflow will not queue any new dags. Furthermore it will make the stop signal available to the dags, which will then stop queueing new tasks. As soon as all active tasks have finished processing, the workflow will terminate. Returns: bool: True if the signal was sent successfully. """ return self._client.send(Request(action='stop_workflow')).success @property def is_stopped(self): """ Check whether the task received a stop signal from the workflow. Tasks can use the stop flag to gracefully terminate their work. This is particularly important for long running tasks and tasks that employ an infinite loop, such as trigger tasks. Returns: bool: True if the task should be stopped. """ resp = self._client.send( Request( action='is_dag_stopped', payload={'dag_name': self._dag_name} ) ) return resp.payload['is_stopped']
class TaskSignal: ''' Class to wrap the construction and sending of signals into easy to use methods.''' def __init__(self, client, dag_name): ''' Initialise the task signal convenience class. Args: client (Client): A reference to a signal client object. dag_name (str): The name of the dag the task belongs to. ''' pass def start_dag(self, dag, *, data=None): ''' Schedule the execution of a dag by sending a signal to the workflow. Args: dag (Dag, str): The dag object or the name of the dag that should be started. data (MultiTaskData): The data that should be passed on to the new dag. Returns: str: The name of the successfully started dag. ''' pass def join_dags(self, names=None): ''' Wait for the specified dags to terminate. This function blocks until the specified dags terminate. If no dags are specified wait for all dags of the workflow, except the dag of the task calling this signal, to terminate. Args: names (list): The names of the dags that have to terminate. Returns: bool: True if all the signal was sent successfully. ''' pass def stop_dag(self, name=None): ''' Send a stop signal to the specified dag or the dag that hosts this task. Args: name str: The name of the dag that should be stopped. If no name is given the dag that hosts this task is stopped. Upon receiving the stop signal, the dag will not queue any new tasks and wait for running tasks to terminate. Returns: bool: True if the signal was sent successfully. ''' pass def stop_workflow(self): ''' Send a stop signal to the workflow. Upon receiving the stop signal, the workflow will not queue any new dags. Furthermore it will make the stop signal available to the dags, which will then stop queueing new tasks. As soon as all active tasks have finished processing, the workflow will terminate. Returns: bool: True if the signal was sent successfully. ''' pass @property def is_stopped(self): ''' Check whether the task received a stop signal from the workflow. Tasks can use the stop flag to gracefully terminate their work. This is particularly important for long running tasks and tasks that employ an infinite loop, such as trigger tasks. Returns: bool: True if the task should be stopped. ''' pass
8
7
16
2
6
8
2
1.24
0
3
3
0
6
2
6
6
101
18
37
11
29
46
15
10
8
3
0
0
9
7,902
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/workflow.py
lightflow.models.workflow.Workflow
class Workflow: """ A workflow manages the execution and monitoring of dags. A workflow is a container for one or more dags. It is responsible for creating, starting and monitoring dags. It is also the central server for the signal system, handling the incoming requests from dags, tasks and the library API. The workflow class hosts the current stop flag for itself and a list of dags that should be stopped. Please note: this class has to be serialisable (e.g. by pickle) Args: queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. """ def __init__(self, queue=DefaultJobQueueName.Workflow, clear_data_store=True): self._queue = queue self._clear_data_store = clear_data_store self._dags_blueprint = {} self._dags_running = {} self._workflow_id = None self._name = None self._parameters = Parameters() self._provided_arguments = {} self._celery_app = None self._stop_workflow = False self._stop_dags = [] self._docstring = None @classmethod def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, arguments=None): """ Create a workflow object from a workflow script. Args: name (str): The name of the workflow script. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: Workflow: A fully initialised workflow object """ new_workflow = cls(queue=queue, clear_data_store=clear_data_store) new_workflow.load(name, arguments=arguments) return new_workflow @property def name(self): """ Returns the name of the workflow. """ return self._name @property def queue(self): """ Returns the name queue the workflow should be scheduled to. """ return self._queue @property def docstring(self): """ Returns the docstring of the workflow or None if empty. """ return self._docstring @property def parameters(self): """ Returns the workflow list of parameters. """ return self._parameters @property def provided_arguments(self): """ Returns the arguments provided to the workflow. """ return self._provided_arguments @property def is_stopped(self): """ Returns whether the workflow was stopped. """ return self._stop_workflow def load(self, name, *, arguments=None, validate_arguments=True, strict_dag=False): """ Import the workflow script and load all known objects. The workflow script is treated like a module and imported into the Python namespace. After the import, the method looks for instances of known classes and stores a reference for further use in the workflow object. Args: name (str): The name of the workflow script. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. validate_arguments (bool): Whether to check that all required arguments have been supplied. strict_dag (bool): If true then the loaded workflow module must contain an instance of Dag. Raises: WorkflowArgumentError: If the workflow requires arguments to be set that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails. """ arguments = {} if arguments is None else arguments try: workflow_module = importlib.import_module(name) dag_present = False # extract objects of specific types from the workflow module for key, obj in workflow_module.__dict__.items(): if isinstance(obj, Dag): self._dags_blueprint[obj.name] = obj dag_present = True elif isinstance(obj, Parameters): self._parameters.extend(obj) self._name = name self._docstring = inspect.getdoc(workflow_module) del sys.modules[name] if strict_dag and not dag_present: raise WorkflowImportError( 'Workflow does not include a dag {}'.format(name)) if validate_arguments: missing_parameters = self._parameters.check_missing(arguments) if len(missing_parameters) > 0: raise WorkflowArgumentError( 'The following parameters are required ' + 'by the workflow, but are missing: {}'.format( ', '.join(missing_parameters))) self._provided_arguments = arguments except (TypeError, ImportError): logger.error('Cannot import workflow {}'.format(name)) raise WorkflowImportError('Cannot import workflow {}'.format(name)) def run(self, config, data_store, signal_server, workflow_id): """ Run all autostart dags in the workflow. Only the dags that are flagged as autostart are started. Args: config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. data_store (DataStore): A DataStore object that is fully initialised and connected to the persistent data storage. signal_server (Server): A signal Server object that receives requests from dags and tasks. workflow_id (str): A unique workflow id that represents this workflow run """ self._workflow_id = workflow_id self._celery_app = create_app(config) # pre-fill the data store with supplied arguments args = self._parameters.consolidate(self._provided_arguments) for key, value in args.items(): data_store.get(self._workflow_id).set(key, value) # start all dags with the autostart flag set to True for name, dag in self._dags_blueprint.items(): if dag.autostart: self._queue_dag(name) # as long as there are dags in the list keep running while self._dags_running: if config.workflow_polling_time > 0.0: sleep(config.workflow_polling_time) # handle new requests from dags, tasks and the library (e.g. cli, web) for i in range(MAX_SIGNAL_REQUESTS): request = signal_server.receive() if request is None: break try: response = self._handle_request(request) if response is not None: signal_server.send(response) else: signal_server.restore(request) except (RequestActionUnknown, RequestFailed): signal_server.send(Response(success=False, uid=request.uid)) # remove any dags and their result data that finished running for name, dag in list(self._dags_running.items()): if dag.ready(): if self._celery_app.conf.result_expires == 0: dag.forget() del self._dags_running[name] elif dag.failed(): self._stop_workflow = True # remove the signal entry signal_server.clear() # delete all entries in the data_store under this workflow id, if requested if self._clear_data_store: data_store.remove(self._workflow_id) def _queue_dag(self, name, *, data=None): """ Add a new dag to the queue. If the stop workflow flag is set, no new dag can be queued. Args: name (str): The name of the dag that should be queued. data (MultiTaskData): The data that should be passed on to the new dag. Raises: DagNameUnknown: If the specified dag name does not exist Returns: str: The name of the queued dag. """ if self._stop_workflow: return None if name not in self._dags_blueprint: raise DagNameUnknown() new_dag = copy.deepcopy(self._dags_blueprint[name]) new_dag.workflow_name = self.name self._dags_running[new_dag.name] = self._celery_app.send_task( JobExecPath.Dag, args=(new_dag, self._workflow_id, data), queue=new_dag.queue, routing_key=new_dag.queue) return new_dag.name def _handle_request(self, request): """ Handle an incoming request by forwarding it to the appropriate method. Args: request (Request): Reference to a request object containing the incoming request. Raises: RequestActionUnknown: If the action specified in the request is not known. Returns: Response: A response object containing the response from the method handling the request. """ if request is None: return Response(success=False, uid=request.uid) action_map = { 'start_dag': self._handle_start_dag, 'stop_workflow': self._handle_stop_workflow, 'join_dags': self._handle_join_dags, 'stop_dag': self._handle_stop_dag, 'is_dag_stopped': self._handle_is_dag_stopped } if request.action in action_map: return action_map[request.action](request) else: raise RequestActionUnknown() def _handle_start_dag(self, request): """ The handler for the start_dag request. The start_dag request creates a new dag and adds it to the queue. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'name': the name of the dag that should be started 'data': the data that is passed onto the start tasks Returns: Response: A response object containing the following fields: - dag_name: The name of the started dag. """ dag_name = self._queue_dag(name=request.payload['name'], data=request.payload['data']) return Response(success=dag_name is not None, uid=request.uid, payload={'dag_name': dag_name}) def _handle_stop_workflow(self, request): """ The handler for the stop_workflow request. The stop_workflow request adds all running dags to the list of dags that should be stopped and prevents new dags from being started. The dags will then stop queueing new tasks, which will terminate the dags and in turn the workflow. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if the dags were added successfully to the list of dags that should be stopped. """ self._stop_workflow = True for name, dag in self._dags_running.items(): if name not in self._stop_dags: self._stop_dags.append(name) return Response(success=True, uid=request.uid) def _handle_join_dags(self, request): """ The handler for the join_dags request. If dag names are given in the payload only return a valid Response if none of the dags specified by the names are running anymore. If no dag names are given, wait for all dags except one, which by design is the one that issued the request, to be finished. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if all dags the request was waiting for have completed. """ if request.payload['names'] is None: send_response = len(self._dags_running) <= 1 else: send_response = all([name not in self._dags_running.keys() for name in request.payload['names']]) if send_response: return Response(success=True, uid=request.uid) else: return None def _handle_stop_dag(self, request): """ The handler for the stop_dag request. The stop_dag request adds a dag to the list of dags that should be stopped. The dag will then stop queueing new tasks and will eventually stop running. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'name': the name of the dag that should be stopped Returns: Response: A response object containing the following fields: - success: True if the dag was added successfully to the list of dags that should be stopped. """ if (request.payload['name'] is not None) and \ (request.payload['name'] not in self._stop_dags): self._stop_dags.append(request.payload['name']) return Response(success=True, uid=request.uid) def _handle_is_dag_stopped(self, request): """ The handler for the dag_stopped request. The dag_stopped request checks whether a dag is flagged to be terminated. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'dag_name': the name of the dag that should be checked Returns: Response: A response object containing the following fields: - is_stopped: True if the dag is flagged to be stopped. """ return Response(success=True, uid=request.uid, payload={ 'is_stopped': request.payload['dag_name'] in self._stop_dags })
class Workflow: ''' A workflow manages the execution and monitoring of dags. A workflow is a container for one or more dags. It is responsible for creating, starting and monitoring dags. It is also the central server for the signal system, handling the incoming requests from dags, tasks and the library API. The workflow class hosts the current stop flag for itself and a list of dags that should be stopped. Please note: this class has to be serialisable (e.g. by pickle) Args: queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. ''' def __init__(self, queue=DefaultJobQueueName.Workflow, clear_data_store=True): pass @classmethod def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, arguments=None): ''' Create a workflow object from a workflow script. Args: name (str): The name of the workflow script. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: Workflow: A fully initialised workflow object ''' pass @property def name(self): ''' Returns the name of the workflow. ''' pass @property def queue(self): ''' Returns the name queue the workflow should be scheduled to. ''' pass @property def docstring(self): ''' Returns the docstring of the workflow or None if empty. ''' pass @property def parameters(self): ''' Returns the workflow list of parameters. ''' pass @property def provided_arguments(self): ''' Returns the arguments provided to the workflow. ''' pass @property def is_stopped(self): ''' Returns whether the workflow was stopped. ''' pass def load(self, name, *, arguments=None, validate_arguments=True, strict_dag=False): ''' Import the workflow script and load all known objects. The workflow script is treated like a module and imported into the Python namespace. After the import, the method looks for instances of known classes and stores a reference for further use in the workflow object. Args: name (str): The name of the workflow script. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. validate_arguments (bool): Whether to check that all required arguments have been supplied. strict_dag (bool): If true then the loaded workflow module must contain an instance of Dag. Raises: WorkflowArgumentError: If the workflow requires arguments to be set that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails. ''' pass def run(self, config, data_store, signal_server, workflow_id): ''' Run all autostart dags in the workflow. Only the dags that are flagged as autostart are started. Args: config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. data_store (DataStore): A DataStore object that is fully initialised and connected to the persistent data storage. signal_server (Server): A signal Server object that receives requests from dags and tasks. workflow_id (str): A unique workflow id that represents this workflow run ''' pass def _queue_dag(self, name, *, data=None): ''' Add a new dag to the queue. If the stop workflow flag is set, no new dag can be queued. Args: name (str): The name of the dag that should be queued. data (MultiTaskData): The data that should be passed on to the new dag. Raises: DagNameUnknown: If the specified dag name does not exist Returns: str: The name of the queued dag. ''' pass def _handle_request(self, request): ''' Handle an incoming request by forwarding it to the appropriate method. Args: request (Request): Reference to a request object containing the incoming request. Raises: RequestActionUnknown: If the action specified in the request is not known. Returns: Response: A response object containing the response from the method handling the request. ''' pass def _handle_start_dag(self, request): ''' The handler for the start_dag request. The start_dag request creates a new dag and adds it to the queue. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'name': the name of the dag that should be started 'data': the data that is passed onto the start tasks Returns: Response: A response object containing the following fields: - dag_name: The name of the started dag. ''' pass def _handle_stop_workflow(self, request): ''' The handler for the stop_workflow request. The stop_workflow request adds all running dags to the list of dags that should be stopped and prevents new dags from being started. The dags will then stop queueing new tasks, which will terminate the dags and in turn the workflow. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if the dags were added successfully to the list of dags that should be stopped. ''' pass def _handle_join_dags(self, request): ''' The handler for the join_dags request. If dag names are given in the payload only return a valid Response if none of the dags specified by the names are running anymore. If no dag names are given, wait for all dags except one, which by design is the one that issued the request, to be finished. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if all dags the request was waiting for have completed. ''' pass def _handle_stop_dag(self, request): ''' The handler for the stop_dag request. The stop_dag request adds a dag to the list of dags that should be stopped. The dag will then stop queueing new tasks and will eventually stop running. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'name': the name of the dag that should be stopped Returns: Response: A response object containing the following fields: - success: True if the dag was added successfully to the list of dags that should be stopped. ''' pass def _handle_is_dag_stopped(self, request): ''' The handler for the dag_stopped request. The dag_stopped request checks whether a dag is flagged to be terminated. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'dag_name': the name of the dag that should be checked Returns: Response: A response object containing the following fields: - is_stopped: True if the dag is flagged to be stopped. ''' pass
25
17
20
3
9
8
3
0.95
0
14
10
0
16
12
17
17
382
76
157
55
131
149
123
46
105
15
0
4
48
7,903
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/const.py
lightflow.queue.const.DefaultJobQueueName
class DefaultJobQueueName: Workflow = 'workflow' Dag = 'dag' Task = 'task'
class DefaultJobQueueName: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
0
0
0
7,904
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/tasks/bash_task.py
lightflow.tasks.bash_task.BashTask
class BashTask(BaseTask): """ The Bash task executes a user-defined bash command or bash file. All task parameters except the name, callbacks, queue, force_run and propagate_skip can either be their native type or a callable returning the native type. Args: name (str): The name of the task. command (function, str): The command or bash file that should be executed. cwd (function, str, None): The working directory for the command. env (function, dict, None): A dictionary of environment variables. user (function, int, None): The user ID of the user with which the command should be executed. group (function, int, None): The group ID of the group with which the command should be executed. stdin (function, str, None): An input string that should be passed on to the process. refresh_time (function, float): The time in seconds the internal output handling waits before checking for new output from the process. capture_stdout (function, bool): Set to ``True`` to capture all standard output in a temporary file. capture_stderr (function, bool): Set to ``True`` to capture all standard errors in a temporary file. callback_process (callable): A callable that is called after the process started. The definition is:: (pid, data, store, signal, context) -> None with the parameters: - **pid** (*int*): The process PID. - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_end (callable): A callable that is called after the process completed. The definition is:: (returncode, stdout_file, stderr_file, data, store, signal, context) -> None with the parameters: - **returncode** (*int*): The return code of the process. - **stdout_file**: A file object with the standard output\ if the flag ``capture_stdout`` was set to ``True``,\ otherwise ``None``. - **stderr_file**: A file object with the error output\ if the flag ``capture_stderr`` was set to ``True`` otherwise ``None.`` - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_stdout (callable): A callable that is called for every line of output the process sends to stdout. The definition is:: (line, data, store, signal, context) -> None with the parameters: - **line** (*str*): Single line of the process output as a string. - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_stderr (callable): A callable that is called for every line of output the process sends to stderr. The definition is:: (line, data, store, signal, context) -> None with the parameters: - **line** (*str*): Single line of the process output as a string. - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. queue (str): Name of the queue the task should be scheduled to. Defaults to the general task queue. callback_init (callable): An optional callable that is called shortly before the task is run. The definition is:: (data, store, signal, context) -> None with the parameters: - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_finally (callable): An optional callable that is always called at the end of a task, regardless whether it completed successfully, was stopped or was aborted. The definition is:: (status, data, store, signal, context) -> None with the parameters: - **status** (*TaskStatus*): The current status of the task. It can\ be one of the following: - ``TaskStatus.Success`` -- task was successful - ``TaskStatus.Stopped`` -- task was stopped - ``TaskStatus.Aborted`` -- task was aborted - ``TaskStatus.Error`` -- task raised an exception - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. force_run (bool): Run the task even if it is flagged to be skipped. propagate_skip (bool): Propagate the skip flag to the next task. """ def __init__(self, name, command, cwd=None, env=None, user=None, group=None, stdin=None, refresh_time=0.1, capture_stdout=False, capture_stderr=False, callback_process=None, callback_end=None, callback_stdout=None, callback_stderr=None, *, queue=DefaultJobQueueName.Task, callback_init=None, callback_finally=None, force_run=False, propagate_skip=True): super().__init__(name, queue=queue, callback_init=callback_init, callback_finally=callback_finally, force_run=force_run, propagate_skip=propagate_skip) self.params = TaskParameters( command=command, cwd=cwd, env=env, user=user, group=group, stdin=stdin, refresh_time=refresh_time, capture_stdout=capture_stdout, capture_stderr=capture_stderr ) self._callback_process = callback_process self._callback_end = callback_end self._callback_stdout = callback_stdout self._callback_stderr = callback_stderr def run(self, data, store, signal, context, **kwargs): """ The main run method of the Python task. Args: data (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. store (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. Returns: Action (Action): An Action object containing the data that should be passed on to the next task and optionally a list of successor tasks that should be executed. """ params = self.params.eval(data, store, exclude=['command']) capture_stdout = self._callback_stdout is not None or params.capture_stdout capture_stderr = self._callback_stderr is not None or params.capture_stderr stdout_file = TemporaryFile() if params.capture_stdout else None stderr_file = TemporaryFile() if params.capture_stderr else None stdout = PIPE if capture_stdout else None stderr = PIPE if capture_stderr else None # change the user or group under which the process should run if params.user is not None or params.group is not None: pre_exec = self._run_as(params.user, params.group) else: pre_exec = None # call the command proc = Popen(self.params.eval_single('command', data, store), cwd=params.cwd, shell=True, env=params.env, preexec_fn=pre_exec, stdout=stdout, stderr=stderr, stdin=PIPE if params.stdin is not None else None) # if input is available, send it to the process if params.stdin is not None: proc.stdin.write(params.stdin.encode(sys.getfilesystemencoding())) # send a notification that the process has been started try: if self._callback_process is not None: self._callback_process(proc.pid, data, store, signal, context) except (StopTask, AbortWorkflow): proc.terminate() raise # send the output handling to a thread if capture_stdout or capture_stderr: output_reader = BashTaskOutputReader(proc, stdout_file, stderr_file, self._callback_stdout, self._callback_stderr, params.refresh_time, data, store, signal, context) output_reader.start() else: output_reader = None # wait for the process to complete and watch for a stop signal while proc.poll() is None or\ (output_reader is not None and output_reader.is_alive()): sleep(params.refresh_time) if signal.is_stopped: proc.terminate() if output_reader is not None: output_reader.join() data = output_reader.data # if a stop or abort exception was raised, stop the bash process and re-raise if output_reader.exc_obj is not None: if proc.poll() is None: proc.terminate() raise output_reader.exc_obj # send a notification that the process has completed if self._callback_end is not None: if stdout_file is not None: stdout_file.seek(0) if stderr_file is not None: stderr_file.seek(0) self._callback_end(proc.returncode, stdout_file, stderr_file, data, store, signal, context) if stdout_file is not None: stdout_file.close() if stderr_file is not None: stderr_file.close() return Action(data) @staticmethod def _run_as(user, group): """ Function wrapper that sets the user and group for the process """ def wrapper(): if user is not None: os.setuid(user) if group is not None: os.setgid(group) return wrapper
class BashTask(BaseTask): ''' The Bash task executes a user-defined bash command or bash file. All task parameters except the name, callbacks, queue, force_run and propagate_skip can either be their native type or a callable returning the native type. Args: name (str): The name of the task. command (function, str): The command or bash file that should be executed. cwd (function, str, None): The working directory for the command. env (function, dict, None): A dictionary of environment variables. user (function, int, None): The user ID of the user with which the command should be executed. group (function, int, None): The group ID of the group with which the command should be executed. stdin (function, str, None): An input string that should be passed on to the process. refresh_time (function, float): The time in seconds the internal output handling waits before checking for new output from the process. capture_stdout (function, bool): Set to ``True`` to capture all standard output in a temporary file. capture_stderr (function, bool): Set to ``True`` to capture all standard errors in a temporary file. callback_process (callable): A callable that is called after the process started. The definition is:: (pid, data, store, signal, context) -> None with the parameters: - **pid** (*int*): The process PID. - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_end (callable): A callable that is called after the process completed. The definition is:: (returncode, stdout_file, stderr_file, data, store, signal, context) -> None with the parameters: - **returncode** (*int*): The return code of the process. - **stdout_file**: A file object with the standard output if the flag ``capture_stdout`` was set to ``True``, otherwise ``None``. - **stderr_file**: A file object with the error output if the flag ``capture_stderr`` was set to ``True`` otherwise ``None.`` - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_stdout (callable): A callable that is called for every line of output the process sends to stdout. The definition is:: (line, data, store, signal, context) -> None with the parameters: - **line** (*str*): Single line of the process output as a string. - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_stderr (callable): A callable that is called for every line of output the process sends to stderr. The definition is:: (line, data, store, signal, context) -> None with the parameters: - **line** (*str*): Single line of the process output as a string. - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. queue (str): Name of the queue the task should be scheduled to. Defaults to the general task queue. callback_init (callable): An optional callable that is called shortly before the task is run. The definition is:: (data, store, signal, context) -> None with the parameters: - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_finally (callable): An optional callable that is always called at the end of a task, regardless whether it completed successfully, was stopped or was aborted. The definition is:: (status, data, store, signal, context) -> None with the parameters: - **status** (*TaskStatus*): The current status of the task. It can be one of the following: - ``TaskStatus.Success`` -- task was successful - ``TaskStatus.Stopped`` -- task was stopped - ``TaskStatus.Aborted`` -- task was aborted - ``TaskStatus.Error`` -- task raised an exception - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. force_run (bool): Run the task even if it is flagged to be skipped. propagate_skip (bool): Propagate the skip flag to the next task. ''' def __init__(self, name, command, cwd=None, env=None, user=None, group=None, stdin=None, refresh_time=0.1, capture_stdout=False, capture_stderr=False, callback_process=None, callback_end=None, callback_stdout=None, callback_stderr=None, *, queue=DefaultJobQueueName.Task, callback_init=None, callback_finally=None, force_run=False, propagate_skip=True): pass def run(self, data, store, signal, context, **kwargs): ''' The main run method of the Python task. Args: data (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. store (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. Returns: Action (Action): An Action object containing the data that should be passed on to the next task and optionally a list of successor tasks that should be executed. ''' pass @staticmethod def _run_as(user, group): ''' Function wrapper that sets the user and group for the process ''' pass def wrapper(): pass
6
3
34
5
23
6
7
1.54
1
8
6
0
2
5
3
27
278
49
90
26
79
139
61
20
56
21
1
3
26
7,905
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/tasks/bash_task.py
lightflow.tasks.bash_task.BashTaskOutputReader
class BashTaskOutputReader(Thread): """ Helper class to read the output of the process. """ def __init__(self, process, stdout_file, stderr_file, callback_stdout, callback_stderr, refresh_time, data, store, signal, context): """ Initializes the reader for the process output. Args: process: Reference to a Popen object representing the running process. stdout_file: The file object for the standard output of the process. stderr_file: The file object for the standard error of the process. callback_stdout: The callback that should be called for every line of the standard output. callback_stderr: The callback that should be called for every line of the standard error. refresh_time (float): The time in seconds before checking for new output from the process. data (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. store (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. """ super().__init__() self._process = process self._stdout_file = stdout_file self._stderr_file = stderr_file self._callback_stdout = callback_stdout self._callback_stderr = callback_stderr self._refresh_time = refresh_time self._data = data self._store = store self._signal = signal self._context = context self._exc_obj = None @property def data(self): return self._data @property def exc_obj(self): return self._exc_obj def run(self): """ Drain the process output streams. """ read_stdout = partial(self._read_output, stream=self._process.stdout, callback=self._callback_stdout, output_file=self._stdout_file) read_stderr = partial(self._read_output, stream=self._process.stderr, callback=self._callback_stderr, output_file=self._stderr_file) # capture the process output as long as the process is active try: while self._process.poll() is None: result_stdout = read_stdout() result_stderr = read_stderr() if not result_stdout and not result_stderr: sleep(self._refresh_time) # read remaining lines while read_stdout(): pass while read_stderr(): pass except (StopTask, AbortWorkflow) as exc: self._exc_obj = exc def _read_output(self, stream, callback, output_file): """ Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False. """ if (callback is None and output_file is None) or stream.closed: return False line = stream.readline() if line: if callback is not None: callback(line.decode(), self._data, self._store, self._signal, self._context) if output_file is not None: output_file.write(line) return True else: return False
class BashTaskOutputReader(Thread): ''' Helper class to read the output of the process. ''' def __init__(self, process, stdout_file, stderr_file, callback_stdout, callback_stderr, refresh_time, data, store, signal, context): ''' Initializes the reader for the process output. Args: process: Reference to a Popen object representing the running process. stdout_file: The file object for the standard output of the process. stderr_file: The file object for the standard error of the process. callback_stdout: The callback that should be called for every line of the standard output. callback_stderr: The callback that should be called for every line of the standard error. refresh_time (float): The time in seconds before checking for new output from the process. data (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. store (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. ''' pass @property def data(self): pass @property def exc_obj(self): pass def run(self): ''' Drain the process output streams. ''' pass def _read_output(self, stream, callback, output_file): ''' Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False. ''' pass
8
4
19
3
10
6
3
0.59
1
4
2
0
5
11
5
30
103
17
54
27
44
32
44
22
38
6
1
3
14
7,906
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/tasks/python_task.py
lightflow.tasks.python_task.PythonTask
class PythonTask(BaseTask): """ The Python task executes a user-defined python method. Args: name (str): The name of the task. callback (callable): A reference to the Python method that should be called by the task as soon as it is run. It has to have the following definition:: (data, store, signal, context) -> None, Action with the parameters: - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. queue (str): Name of the queue the task should be scheduled to. Defaults to the general task queue. callback_init (callable): An optional callable that is called shortly before the task is run. The definition is:: (data, store, signal, context) -> None with the parameters: - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_finally (callable): An optional callable that is always called at the end of a task, regardless whether it completed successfully, was stopped or was aborted. The definition is:: (status, data, store, signal, context) -> None with the parameters: - **status** (*TaskStatus*): The current status of the task. It can\ be one of the following: - ``TaskStatus.Success`` -- task was successful - ``TaskStatus.Stopped`` -- task was stopped - ``TaskStatus.Aborted`` -- task was aborted - ``TaskStatus.Error`` -- task raised an exception - **data** (:class:`.MultiTaskData`): The data object that has been passed\ from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object\ that allows the task to store data for access across the current\ workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps\ the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. force_run (bool): Run the task even if it is flagged to be skipped. propagate_skip (bool): Propagate the skip flag to the next task. """ def __init__(self, name, callback=None, *, queue=DefaultJobQueueName.Task, callback_init=None, callback_finally=None, force_run=False, propagate_skip=True): super().__init__(name, queue=queue, callback_init=callback_init, callback_finally=callback_finally, force_run=force_run, propagate_skip=propagate_skip) self._callback = callback def run(self, data, store, signal, context, **kwargs): """ The main run method of the Python task. Args: data (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. store (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. Returns: Action: An Action object containing the data that should be passed on to the next task and optionally a list of successor tasks that should be executed. """ if self._callback is not None: result = self._callback(data, store, signal, context, **kwargs) return result if result is not None else Action(data)
class PythonTask(BaseTask): ''' The Python task executes a user-defined python method. Args: name (str): The name of the task. callback (callable): A reference to the Python method that should be called by the task as soon as it is run. It has to have the following definition:: (data, store, signal, context) -> None, Action with the parameters: - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. queue (str): Name of the queue the task should be scheduled to. Defaults to the general task queue. callback_init (callable): An optional callable that is called shortly before the task is run. The definition is:: (data, store, signal, context) -> None with the parameters: - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. callback_finally (callable): An optional callable that is always called at the end of a task, regardless whether it completed successfully, was stopped or was aborted. The definition is:: (status, data, store, signal, context) -> None with the parameters: - **status** (*TaskStatus*): The current status of the task. It can be one of the following: - ``TaskStatus.Success`` -- task was successful - ``TaskStatus.Stopped`` -- task was stopped - ``TaskStatus.Aborted`` -- task was aborted - ``TaskStatus.Error`` -- task raised an exception - **data** (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. - **store** (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. - **signal** (*TaskSignal*): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. - **context** (*TaskContext*): The context in which the tasks runs. force_run (bool): Run the task even if it is flagged to be skipped. propagate_skip (bool): Propagate the skip flag to the next task. ''' def __init__(self, name, callback=None, *, queue=DefaultJobQueueName.Task, callback_init=None, callback_finally=None, force_run=False, propagate_skip=True): pass def run(self, data, store, signal, context, **kwargs): ''' The main run method of the Python task. Args: data (:class:`.MultiTaskData`): The data object that has been passed from the predecessor task. store (:class:`.DataStoreDocument`): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. Returns: Action: An Action object containing the data that should be passed on to the next task and optionally a list of successor tasks that should be executed. ''' pass
3
2
14
1
6
7
2
5.42
1
3
2
0
2
1
2
26
95
18
12
7
7
65
8
5
5
3
1
1
4
7,907
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/tests/test_base_task.py
tests.test_base_task.CeleryResultMock
class CeleryResultMock: def __init__(self, *, state=None, ready=False, failed=False): self.state = state self._ready = ready self._failed = failed self._forget_called = False def ready(self): return self._ready def failed(self): return self._failed def forget(self): self._forget_called = True
class CeleryResultMock: def __init__(self, *, state=None, ready=False, failed=False): pass def ready(self): pass def failed(self): pass def forget(self): pass
5
0
3
0
3
0
1
0
0
0
0
0
4
4
4
4
15
3
12
9
7
0
12
9
7
1
0
0
4
7,908
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.QueueStats
class QueueStats: """ Represents the queue information returned from celery. Args: name (str): The name of the queue. routing_key (str): The routing key of the queue. """ def __init__(self, name, routing_key): self.name = name self.routing_key = routing_key @classmethod def from_celery(cls, queue_dict): """ Create a QueueStats object from the dictionary returned by celery. Args: queue_dict (dict): The dictionary as returned by celery. Returns: QueueStats: A fully initialized QueueStats object. """ return QueueStats( name=queue_dict['name'], routing_key=queue_dict['routing_key'] ) def to_dict(self): """ Return a dictionary of the queue stats. Returns: dict: Dictionary of the stats. """ return { 'name': self.name, 'routing_key': self.routing_key }
class QueueStats: ''' Represents the queue information returned from celery. Args: name (str): The name of the queue. routing_key (str): The routing key of the queue. ''' def __init__(self, name, routing_key): pass @classmethod def from_celery(cls, queue_dict): ''' Create a QueueStats object from the dictionary returned by celery. Args: queue_dict (dict): The dictionary as returned by celery. Returns: QueueStats: A fully initialized QueueStats object. ''' pass def to_dict(self): ''' Return a dictionary of the queue stats. Returns: dict: Dictionary of the stats. ''' pass
5
3
9
1
4
3
1
1
0
0
0
0
2
2
3
3
36
6
15
7
10
15
8
6
4
1
0
0
3
7,909
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.JobSucceededEvent
class JobSucceededEvent(JobEvent): """ This event is triggered when a job completed successfully. """ def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): super().__init__(uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration)
class JobSucceededEvent(JobEvent): ''' This event is triggered when a job completed successfully. ''' def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): pass
2
1
4
0
4
0
1
0.2
1
1
0
0
1
0
1
3
6
0
5
3
2
1
3
2
1
1
1
0
1
7,910
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.JobStoppedEvent
class JobStoppedEvent(JobEvent): """ This event is triggered when a job was stopped. """ def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): super().__init__(uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration)
class JobStoppedEvent(JobEvent): ''' This event is triggered when a job was stopped. ''' def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): pass
2
1
4
0
4
0
1
0.2
1
1
0
0
1
0
1
3
6
0
5
3
2
1
3
2
1
1
1
0
1
7,911
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/mongo_proxy.py
lightflow.models.mongo_proxy.MongoClientProxy
class MongoClientProxy(MongoReconnectProxy): """ Proxy for catching AutoReconnect exceptions in function calls of the MongoClient Specialization of the MongoReconnectProxy class for the MongoClient class. """ def __init__(self, obj): super().__init__(obj, get_methods(pymongo.collection.Collection, pymongo.database.Database, MongoClient, pymongo))
class MongoClientProxy(MongoReconnectProxy): ''' Proxy for catching AutoReconnect exceptions in function calls of the MongoClient Specialization of the MongoReconnectProxy class for the MongoClient class. ''' def __init__(self, obj): pass
2
1
6
0
6
0
1
0.43
1
1
0
0
1
0
1
9
11
1
7
2
5
3
3
2
1
1
1
0
1
7,912
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.JobStats
class JobStats: """ Represents the job (=celery task) information returned from celery. Args: name (str): The name of the job. job_id (str): The internal ID of the job. job_type (str): The type of the job (workflow, dag, task). queue (str): The name of the queue the job was scheduled to. workflow_id (str): The id of the workflow that started this job. start_time (datetime): The time the job was started in UTC. arguments (dict): The provided arguments to a workflow. acknowledged (bool): True of the job was acknowledged by the message system. func_name (str): The name of the function that represents this job. hostname (str): The name of the host this job runs on. worker_name (str): The name of the worker this job runs on. worker_pid (int): The pid of the process this jobs runs on. routing_key (str): The routing key for this job. """ def __init__(self, name, job_id, job_type, queue, workflow_id, start_time, arguments, acknowledged, func_name, hostname, worker_name, worker_pid, routing_key): self.name = name self.id = job_id self.type = job_type self.workflow_id = workflow_id self.queue = queue self.start_time = start_time self.arguments = arguments self.acknowledged = acknowledged self.func_name = func_name self.hostname = hostname self.worker_name = worker_name self.worker_pid = worker_pid self.routing_key = routing_key @classmethod def from_celery(cls, worker_name, job_dict, celery_app): """ Create a JobStats object from the dictionary returned by celery. Args: worker_name (str): The name of the worker this jobs runs on. job_dict (dict): The dictionary as returned by celery. celery_app: Reference to a celery application object. Returns: JobStats: A fully initialized JobStats object. """ if not isinstance(job_dict, dict) or 'id' not in job_dict: raise JobStatInvalid('The job description is missing important fields.') async_result = AsyncResult(id=job_dict['id'], app=celery_app) a_info = async_result.info if isinstance(async_result.info, dict) else None return JobStats( name=a_info.get('name', '') if a_info is not None else '', job_id=job_dict['id'], job_type=a_info.get('type', '') if a_info is not None else '', workflow_id=a_info.get('workflow_id', '') if a_info is not None else '', queue=a_info.get('queue', '') if a_info is not None else '', start_time=a_info.get('start_time', None) if a_info is not None else None, arguments=a_info.get('arguments', {}) if a_info is not None else {}, acknowledged=job_dict['acknowledged'], func_name=job_dict['type'], hostname=job_dict['hostname'], worker_name=worker_name, worker_pid=job_dict['worker_pid'], routing_key=job_dict['delivery_info']['routing_key'] ) def to_dict(self): """ Return a dictionary of the job stats. Returns: dict: Dictionary of the stats. """ return { 'name': self.name, 'id': self.id, 'type': self.type, 'workflow_id': self.workflow_id, 'queue': self.queue, 'start_time': self.start_time, 'arguments': self.arguments, 'acknowledged': self.acknowledged, 'func_name': self.func_name, 'hostname': self.hostname, 'worker_name': self.worker_name, 'worker_pid': self.worker_pid, 'routing_key': self.routing_key }
class JobStats: ''' Represents the job (=celery task) information returned from celery. Args: name (str): The name of the job. job_id (str): The internal ID of the job. job_type (str): The type of the job (workflow, dag, task). queue (str): The name of the queue the job was scheduled to. workflow_id (str): The id of the workflow that started this job. start_time (datetime): The time the job was started in UTC. arguments (dict): The provided arguments to a workflow. acknowledged (bool): True of the job was acknowledged by the message system. func_name (str): The name of the function that represents this job. hostname (str): The name of the host this job runs on. worker_name (str): The name of the worker this job runs on. worker_pid (int): The pid of the process this jobs runs on. routing_key (str): The routing key for this job. ''' def __init__(self, name, job_id, job_type, queue, workflow_id, start_time, arguments, acknowledged, func_name, hostname, worker_name, worker_pid, routing_key): pass @classmethod def from_celery(cls, worker_name, job_dict, celery_app): ''' Create a JobStats object from the dictionary returned by celery. Args: worker_name (str): The name of the worker this jobs runs on. job_dict (dict): The dictionary as returned by celery. celery_app: Reference to a celery application object. Returns: JobStats: A fully initialized JobStats object. ''' pass def to_dict(self): ''' Return a dictionary of the job stats. Returns: dict: Dictionary of the stats. ''' pass
5
3
23
2
17
4
4
0.53
0
2
1
0
2
13
3
3
89
8
53
21
47
28
23
19
19
9
0
1
11
7,913
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.JobEvent
class JobEvent: """ The base class for job events from celery. Args: uuid (str): The internal event id. job_type (str): The type of job that caused this event (workflow, dag, task). event_type (str): The internal event type name. queue (str): The name of the queue the job was scheduled to. hostname (str): The name of the host on which the job is running. pid (int): The pid of the process that runs the job. name (str): The name of the workflow, dag or task that caused this event. workflow_id (str): The id of the workflow that hosts this job. event_time (datetime): The time when the event was triggered. duration (float, None): The duration it took to execute the job. """ def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): self.uuid = uuid self.type = job_type self.event = event_type self.queue = queue self.hostname = hostname self.pid = pid self.name = name self.workflow_id = workflow_id self.event_time = event_time self.duration = duration @classmethod def from_event(cls, event): """ Create a JobEvent object from the event dictionary returned by celery. Args: event (dict): The dictionary as returned by celery. Returns: JobEvent: A fully initialized JobEvent object. """ return cls( uuid=event['uuid'], job_type=event['job_type'], event_type=event['type'], queue=event['queue'], hostname=event['hostname'], pid=event['pid'], name=event['name'], workflow_id=event['workflow_id'], event_time=event['time'], duration=event['duration'] )
class JobEvent: ''' The base class for job events from celery. Args: uuid (str): The internal event id. job_type (str): The type of job that caused this event (workflow, dag, task). event_type (str): The internal event type name. queue (str): The name of the queue the job was scheduled to. hostname (str): The name of the host on which the job is running. pid (int): The pid of the process that runs the job. name (str): The name of the workflow, dag or task that caused this event. workflow_id (str): The id of the workflow that hosts this job. event_time (datetime): The time when the event was triggered. duration (float, None): The duration it took to execute the job. ''' def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): pass @classmethod def from_event(cls, event): ''' Create a JobEvent object from the event dictionary returned by celery. Args: event (dict): The dictionary as returned by celery. Returns: JobEvent: A fully initialized JobEvent object. ''' pass
4
2
17
1
13
3
1
0.7
0
0
0
4
1
10
2
2
50
4
27
15
22
19
14
13
11
1
0
0
2
7,914
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.JobAbortedEvent
class JobAbortedEvent(JobEvent): """ This event is triggered when a job was aborted. """ def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): super().__init__(uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration)
class JobAbortedEvent(JobEvent): ''' This event is triggered when a job was aborted. ''' def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): pass
2
1
4
0
4
0
1
0.2
1
1
0
0
1
0
1
3
6
0
5
3
2
1
3
2
1
1
1
0
1
7,915
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.BrokerStats
class BrokerStats: """ Represents the broker information returned from celery. Args: hostname (str): The broker hostname. port (int): The broker port. transport (str): The transport protocol of the broker. virtual_host (str): The virtual host, e.g. the database number in redis. """ def __init__(self, hostname, port, transport, virtual_host): self.hostname = hostname self.port = port self.transport = transport self.virtual_host = virtual_host @classmethod def from_celery(cls, broker_dict): """ Create a BrokerStats object from the dictionary returned by celery. Args: broker_dict (dict): The dictionary as returned by celery. Returns: BrokerStats: A fully initialized BrokerStats object. """ return BrokerStats( hostname=broker_dict['hostname'], port=broker_dict['port'], transport=broker_dict['transport'], virtual_host=broker_dict['virtual_host'] ) def to_dict(self): """ Return a dictionary of the broker stats. Returns: dict: Dictionary of the stats. """ return { 'hostname': self.hostname, 'port': self.port, 'transport': self.transport, 'virtual_host': self.virtual_host }
class BrokerStats: ''' Represents the broker information returned from celery. Args: hostname (str): The broker hostname. port (int): The broker port. transport (str): The transport protocol of the broker. virtual_host (str): The virtual host, e.g. the database number in redis. ''' def __init__(self, hostname, port, transport, virtual_host): pass @classmethod def from_celery(cls, broker_dict): ''' Create a BrokerStats object from the dictionary returned by celery. Args: broker_dict (dict): The dictionary as returned by celery. Returns: BrokerStats: A fully initialized BrokerStats object. ''' pass def to_dict(self): ''' Return a dictionary of the broker stats. Returns: dict: Dictionary of the stats. ''' pass
5
3
11
1
6
3
1
0.81
0
0
0
0
2
4
3
3
44
6
21
9
16
17
10
8
6
1
0
0
3
7,916
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/const.py
lightflow.queue.const.JobType
class JobType: Workflow = 'workflow' Dag = 'dag' Task = 'task'
class JobType: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
0
0
0
7,917
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/const.py
lightflow.queue.const.JobStatus
class JobStatus: Active = 0 Registered = 1 Reserved = 2 Scheduled = 3
class JobStatus: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
0
0
0
7,918
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/const.py
lightflow.queue.const.JobExecPath
class JobExecPath: Workflow = 'lightflow.queue.jobs.execute_workflow' Dag = 'lightflow.queue.jobs.execute_dag' Task = 'lightflow.queue.jobs.execute_task'
class JobExecPath: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0
4
4
3
0
4
4
3
0
0
0
0
7,919
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/const.py
lightflow.queue.const.JobEventName
class JobEventName: Started = 'task-lightflow-started' Succeeded = 'task-lightflow-succeeded' Stopped = 'task-lightflow-stopped' Aborted = 'task-lightflow-aborted'
class JobEventName: pass
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0
5
5
4
0
5
5
4
0
0
0
0
7,920
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.JobStartedEvent
class JobStartedEvent(JobEvent): """ This event is triggered when a new job starts running. """ def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): super().__init__(uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration)
class JobStartedEvent(JobEvent): ''' This event is triggered when a new job starts running. ''' def __init__(self, uuid, job_type, event_type, queue, hostname, pid, name, workflow_id, event_time, duration): pass
2
1
4
0
4
0
1
0.2
1
1
0
0
1
0
1
3
6
0
5
3
2
1
3
2
1
1
1
0
1
7,921
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/mongo_proxy.py
lightflow.models.mongo_proxy.GridFSProxy
class GridFSProxy(MongoReconnectProxy): """ Proxy for catching AutoReconnect exceptions in function calls of the GridFS class Specialization of the MongoReconnectProxy class for the GridFS class. """ def __init__(self, obj): super().__init__(obj, get_methods(gridfs, GridFS))
class GridFSProxy(MongoReconnectProxy): ''' Proxy for catching AutoReconnect exceptions in function calls of the GridFS class Specialization of the MongoReconnectProxy class for the GridFS class. ''' def __init__(self, obj): pass
2
1
3
0
3
0
1
0.75
1
1
0
0
1
0
1
9
8
1
4
2
2
3
3
2
1
1
1
0
1
7,922
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DataInvalidAlias
class DataInvalidAlias(RuntimeError): pass
class DataInvalidAlias(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,923
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.WorkflowDefinitionError
class WorkflowDefinitionError(RuntimeError): def __init__(self, workflow_name, graph_name): """ Initialize the exception for invalid workflow definitions. Args: workflow_name (str): The name of the workflow that contains an invalid definition. graph_name (str): The name of the dag that is invalid. """ self.workflow_name = workflow_name self.graph_name = graph_name
class WorkflowDefinitionError(RuntimeError): def __init__(self, workflow_name, graph_name): ''' Initialize the exception for invalid workflow definitions. Args: workflow_name (str): The name of the workflow that contains an invalid definition. graph_name (str): The name of the dag that is invalid. ''' pass
2
1
10
1
3
6
1
1.5
1
0
0
0
1
2
1
12
11
1
4
4
2
6
4
4
2
1
4
0
1
7,924
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/dag_signal.py
lightflow.models.dag_signal.DagSignal
class DagSignal: """ Class to wrap the construction and sending of signals into easy to use methods """ def __init__(self, client, dag_name): """ Initialise the dag signal convenience class. Args: client (Client): A reference to a signal client object. dag_name (str): The name of the dag sending this signal. """ self._client = client self._dag_name = dag_name def stop_workflow(self): """ Send a stop signal to the workflow. Upon receiving the stop signal, the workflow will not queue any new dags. Furthermore it will make the stop signal available to the dags, which will then stop queueing new tasks. As soon as all active tasks have finished processing, the workflow will terminate. Returns: bool: True if the signal was sent successfully. """ return self._client.send(Request(action='stop_workflow')).success @property def is_stopped(self): """ Check whether the dag received a stop signal from the workflow. As soon as the dag receives a stop signal, no new tasks will be queued and the dag will wait for the active tasks to terminate. Returns: bool: True if the dag should be stopped. """ resp = self._client.send( Request( action='is_dag_stopped', payload={'dag_name': self._dag_name} ) ) return resp.payload['is_stopped']
class DagSignal: ''' Class to wrap the construction and sending of signals into easy to use methods ''' def __init__(self, client, dag_name): ''' Initialise the dag signal convenience class. Args: client (Client): A reference to a signal client object. dag_name (str): The name of the dag sending this signal. ''' pass def stop_workflow(self): ''' Send a stop signal to the workflow. Upon receiving the stop signal, the workflow will not queue any new dags. Furthermore it will make the stop signal available to the dags, which will then stop queueing new tasks. As soon as all active tasks have finished processing, the workflow will terminate. Returns: bool: True if the signal was sent successfully. ''' pass @property def is_stopped(self): ''' Check whether the dag received a stop signal from the workflow. As soon as the dag receives a stop signal, no new tasks will be queued and the dag will wait for the active tasks to terminate. Returns: bool: True if the dag should be stopped. ''' pass
5
4
12
2
4
6
1
1.33
0
1
1
0
3
2
3
3
42
7
15
8
10
20
9
7
5
1
0
0
3
7,925
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.WorkflowImportError
class WorkflowImportError(RuntimeError): pass
class WorkflowImportError(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,926
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/datastore.py
lightflow.models.datastore.DataStore
class DataStore: """ The persistent data storage for data shared during the life of a workflow. The DataStore is a persistent storage for all data that should be stored over the lifetime of a workflow and be made available to all tasks in the workflow. This storage is particularly useful for trigger based workflows that perform averaging or summing work. The DataStore is implemented using a MongoDB backend. For each workflow run a document is created and its id is used for identifying the workflow run. A proxy for the MongoClient is used to catch the AutoReconnect exception and handle it gracefully. Please note: Args: host (str): The host on which the MongoDB server runs. port (int): The port on which the MongoDB server listens. database (str): The name of the MongoDB collection. username (str): The username for the user logging in to MongoDB. password (str): The password for the user logging in to MongoDB. auth_source (str): The name of the database the user information is stored in. auth_mechanism (str): The authentication mechanism. connect_timeout (int): The timeout in ms after which a connection attempt is ended. auto_connect (bool): Set to True to connect to the MongoDB database immediately. handle_reconnect (bool): Set to True to automatically reconnect to MongoDB should the connection be lost. """ def __init__(self, host, port, database, *, username=None, password=None, auth_source='admin', auth_mechanism=None, connect_timeout=30000, auto_connect=False, handle_reconnect=True): self.host = host self.port = port self.database = database self._username = username self._password = password self._auth_source = auth_source self._auth_mechanism = auth_mechanism self._connect_timeout = connect_timeout self._handle_reconnect = handle_reconnect self._client = None if auto_connect: self.connect() def __enter__(self): """ Connects to MongoDB automatically when used as context manager """ if not self.is_connected: self.connect() return self def __exit__(self, *args): """ Disconnects from MongoDB automatically when used as context manager """ self.disconnect() @property def is_connected(self): """ Returns the connection status of the data store. Returns: bool: ``True`` if the data store is connected to the MongoDB server. """ if self._client is not None: try: self._client.server_info() except ConnectionFailure: return False return True else: return False def connect(self): """ Establishes a connection to the MongoDB server. Use the MongoProxy library in order to automatically handle AutoReconnect exceptions in a graceful and reliable way. """ mongodb_args = { 'host': self.host, 'port': self.port, 'username': self._username, 'password': self._password, 'authSource': self._auth_source, 'serverSelectionTimeoutMS': self._connect_timeout } if self._auth_mechanism is not None: mongodb_args['authMechanism'] = self._auth_mechanism self._client = MongoClient(**mongodb_args) if self._handle_reconnect: self._client = MongoClientProxy(self._client) def disconnect(self): """ Disconnect from the MongoDB server. """ if self._client is not None: self._client.close() @property def server_info(self): """ Returns the information of the connected MongoDB server. Returns: bool: ``True`` if a document with the specified workflow id exists. """ try: return self._client.server_info() except ConnectionFailure: raise DataStoreNotConnected() def exists(self, workflow_id): """ Checks whether a document with the specified workflow id already exists. Args: workflow_id (str): The workflow id that should be checked. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: bool: ``True`` if a document with the specified workflow id exists. """ try: db = self._client[self.database] col = db[WORKFLOW_DATA_COLLECTION_NAME] return col.find_one({"_id": ObjectId(workflow_id)}) is not None except ConnectionFailure: raise DataStoreNotConnected() def add(self, payload=None): """ Adds a new document to the data store and returns its id. Args: payload (dict): Dictionary of initial data that should be stored in the new document in the meta section. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: str: The id of the newly created document. """ try: db = self._client[self.database] col = db[WORKFLOW_DATA_COLLECTION_NAME] return str(col.insert_one({ DataStoreDocumentSection.Meta: payload if isinstance(payload, dict) else {}, DataStoreDocumentSection.Data: {} }).inserted_id) except ConnectionFailure: raise DataStoreNotConnected() def remove(self, workflow_id): """ Removes a document specified by its id from the data store. All associated GridFs documents are deleted as well. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server. """ try: db = self._client[self.database] fs = GridFSProxy(GridFS(db.unproxied_object)) for grid_doc in fs.find({"workflow_id": workflow_id}, no_cursor_timeout=True): fs.delete(grid_doc._id) col = db[WORKFLOW_DATA_COLLECTION_NAME] return col.delete_one({"_id": ObjectId(workflow_id)}) except ConnectionFailure: raise DataStoreNotConnected() def get(self, workflow_id): """ Returns the document for the given workflow id. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: DataStoreDocument: The document for the given workflow id. """ try: db = self._client[self.database] fs = GridFSProxy(GridFS(db.unproxied_object)) return DataStoreDocument(db[WORKFLOW_DATA_COLLECTION_NAME], fs, workflow_id) except ConnectionFailure: raise DataStoreNotConnected()
class DataStore: ''' The persistent data storage for data shared during the life of a workflow. The DataStore is a persistent storage for all data that should be stored over the lifetime of a workflow and be made available to all tasks in the workflow. This storage is particularly useful for trigger based workflows that perform averaging or summing work. The DataStore is implemented using a MongoDB backend. For each workflow run a document is created and its id is used for identifying the workflow run. A proxy for the MongoClient is used to catch the AutoReconnect exception and handle it gracefully. Please note: Args: host (str): The host on which the MongoDB server runs. port (int): The port on which the MongoDB server listens. database (str): The name of the MongoDB collection. username (str): The username for the user logging in to MongoDB. password (str): The password for the user logging in to MongoDB. auth_source (str): The name of the database the user information is stored in. auth_mechanism (str): The authentication mechanism. connect_timeout (int): The timeout in ms after which a connection attempt is ended. auto_connect (bool): Set to True to connect to the MongoDB database immediately. handle_reconnect (bool): Set to True to automatically reconnect to MongoDB should the connection be lost. ''' def __init__(self, host, port, database, *, username=None, password=None, auth_source='admin', auth_mechanism=None, connect_timeout=30000, auto_connect=False, handle_reconnect=True): pass def __enter__(self): ''' Connects to MongoDB automatically when used as context manager ''' pass def __exit__(self, *args): ''' Disconnects from MongoDB automatically when used as context manager ''' pass @property def is_connected(self): ''' Returns the connection status of the data store. Returns: bool: ``True`` if the data store is connected to the MongoDB server. ''' pass def connect(self): ''' Establishes a connection to the MongoDB server. Use the MongoProxy library in order to automatically handle AutoReconnect exceptions in a graceful and reliable way. ''' pass def disconnect(self): ''' Disconnect from the MongoDB server. ''' pass @property def server_info(self): ''' Returns the information of the connected MongoDB server. Returns: bool: ``True`` if a document with the specified workflow id exists. ''' pass def exists(self, workflow_id): ''' Checks whether a document with the specified workflow id already exists. Args: workflow_id (str): The workflow id that should be checked. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: bool: ``True`` if a document with the specified workflow id exists. ''' pass def add(self, payload=None): ''' Adds a new document to the data store and returns its id. Args: payload (dict): Dictionary of initial data that should be stored in the new document in the meta section. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: str: The id of the newly created document. ''' pass def remove(self, workflow_id): ''' Removes a document specified by its id from the data store. All associated GridFs documents are deleted as well. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server. ''' pass def get(self, workflow_id): ''' Returns the document for the given workflow id. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: DataStoreDocument: The document for the given workflow id. ''' pass
14
11
15
2
8
4
2
0.77
0
7
5
0
11
10
11
11
202
41
91
37
75
70
74
33
62
3
0
2
25
7,927
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/datastore.py
lightflow.models.datastore.DataStoreDocument
class DataStoreDocument: """ A single data store document containing the data for a workflow run. The document provides methods in order to retrieve and set data in the persistent data store. It represents the data for a single workflow run. Args: collection: A MongoDB collection object pointing to the data store collection. grid_fs: A GridFS object used for splitting large, binary data into smaller chunks in order to avoid the 16MB document limit of MongoDB. workflow_id: The id of the workflow run this document is associated with. """ def __init__(self, collection, grid_fs, workflow_id): self._collection = collection self._gridfs = grid_fs self._workflow_id = workflow_id def get(self, key, default=None, *, section=DataStoreDocumentSection.Data): """ Return the field specified by its key from the specified section. This method access the specified section of the workflow document and returns the value for the given key. Args: key (str): The key pointing to the value that should be retrieved. It supports MongoDB's dot notation for nested fields. default: The default value that is returned if the key does not exist. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: object: The value from the field that the specified key is pointing to. If the key does not exist, the default value is returned. If no default value is provided and the key does not exist ``None`` is returned. """ key_notation = '.'.join([section, key]) try: return self._decode_value(self._data_from_dotnotation(key_notation, default)) except KeyError: return None def set(self, key, value, *, section=DataStoreDocumentSection.Data): """ Store a value under the specified key in the given section of the document. This method stores a value into the specified section of the workflow data store document. Any existing value is overridden. Before storing a value, any linked GridFS document under the specified key is deleted. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be stored/updated. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be set/updated, otherwise ``False``. """ key_notation = '.'.join([section, key]) try: self._delete_gridfs_data(self._data_from_dotnotation(key_notation, default=None)) except KeyError: logger.info('Adding new field {} to the data store'.format(key_notation)) result = self._collection.update_one( {"_id": ObjectId(self._workflow_id)}, { "$set": { key_notation: self._encode_value(value) }, "$currentDate": {"lastModified": True} } ) return result.modified_count == 1 def push(self, key, value, *, section=DataStoreDocumentSection.Data): """ Appends a value to a list in the specified section of the document. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be appended to a list in the data store. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be appended, otherwise ``False``. """ key_notation = '.'.join([section, key]) result = self._collection.update_one( {"_id": ObjectId(self._workflow_id)}, { "$push": { key_notation: self._encode_value(value) }, "$currentDate": {"lastModified": True} } ) return result.modified_count == 1 def extend(self, key, values, *, section=DataStoreDocumentSection.Data): """ Extends a list in the data store with the elements of values. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. values (list): A list of the values that should be used to extend the list in the document. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the list in the database could be extended, otherwise ``False``. """ key_notation = '.'.join([section, key]) if not isinstance(values, list): return False result = self._collection.update_one( {"_id": ObjectId(self._workflow_id)}, { "$push": { key_notation: {"$each": self._encode_value(values)} }, "$currentDate": {"lastModified": True} } ) return result.modified_count == 1 def _data_from_dotnotation(self, key, default=None): """ Returns the MongoDB data from a key using dot notation. Args: key (str): The key to the field in the workflow document. Supports MongoDB's dot notation for embedded fields. default (object): The default value that is returned if the key does not exist. Returns: object: The data for the specified key or the default value. """ if key is None: raise KeyError('NoneType is not a valid key!') doc = self._collection.find_one({"_id": ObjectId(self._workflow_id)}) if doc is None: return default for k in key.split('.'): doc = doc[k] return doc def _encode_value(self, value): """ Encodes the value such that it can be stored into MongoDB. Any primitive types are stored directly into MongoDB, while non-primitive types are pickled and stored as GridFS objects. The id pointing to a GridFS object replaces the original value. Args: value (object): The object that should be encoded for storing in MongoDB. Returns: object: The encoded value ready to be stored in MongoDB. """ if isinstance(value, (int, float, str, bool, datetime)): return value elif isinstance(value, list): return [self._encode_value(item) for item in value] elif isinstance(value, dict): result = {} for key, item in value.items(): result[key] = self._encode_value(item) return result else: return self._gridfs.put(Binary(pickle.dumps(value)), workflow_id=self._workflow_id) def _decode_value(self, value): """ Decodes the value by turning any binary data back into Python objects. The method searches for ObjectId values, loads the associated binary data from GridFS and returns the decoded Python object. Args: value (object): The value that should be decoded. Raises: DataStoreDecodingError: An ObjectId was found but the id is not a valid GridFS id. DataStoreDecodeUnknownType: The type of the specified value is unknown. Returns: object: The decoded value as a valid Python object. """ if isinstance(value, (int, float, str, bool, datetime)): return value elif isinstance(value, list): return [self._decode_value(item) for item in value] elif isinstance(value, dict): result = {} for key, item in value.items(): result[key] = self._decode_value(item) return result elif isinstance(value, ObjectId): if self._gridfs.exists({"_id": value}): return pickle.loads(self._gridfs.get(value).read()) else: raise DataStoreGridfsIdInvalid() else: raise DataStoreDecodeUnknownType() def _delete_gridfs_data(self, data): """ Delete all GridFS data that is linked by fields in the specified data. Args: data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object for any ObjectID is deleted. """ if isinstance(data, ObjectId): if self._gridfs.exists({"_id": data}): self._gridfs.delete(data) else: raise DataStoreGridfsIdInvalid() elif isinstance(data, list): for item in data: self._delete_gridfs_data(item) elif isinstance(data, dict): for key, item in data.items(): self._delete_gridfs_data(item)
class DataStoreDocument: ''' A single data store document containing the data for a workflow run. The document provides methods in order to retrieve and set data in the persistent data store. It represents the data for a single workflow run. Args: collection: A MongoDB collection object pointing to the data store collection. grid_fs: A GridFS object used for splitting large, binary data into smaller chunks in order to avoid the 16MB document limit of MongoDB. workflow_id: The id of the workflow run this document is associated with. ''' def __init__(self, collection, grid_fs, workflow_id): pass def get(self, key, default=None, *, section=DataStoreDocumentSection.Data): ''' Return the field specified by its key from the specified section. This method access the specified section of the workflow document and returns the value for the given key. Args: key (str): The key pointing to the value that should be retrieved. It supports MongoDB's dot notation for nested fields. default: The default value that is returned if the key does not exist. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: object: The value from the field that the specified key is pointing to. If the key does not exist, the default value is returned. If no default value is provided and the key does not exist ``None`` is returned. ''' pass def set(self, key, value, *, section=DataStoreDocumentSection.Data): ''' Store a value under the specified key in the given section of the document. This method stores a value into the specified section of the workflow data store document. Any existing value is overridden. Before storing a value, any linked GridFS document under the specified key is deleted. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be stored/updated. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be set/updated, otherwise ``False``. ''' pass def push(self, key, value, *, section=DataStoreDocumentSection.Data): ''' Appends a value to a list in the specified section of the document. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be appended to a list in the data store. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be appended, otherwise ``False``. ''' pass def extend(self, key, values, *, section=DataStoreDocumentSection.Data): ''' Extends a list in the data store with the elements of values. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. values (list): A list of the values that should be used to extend the list in the document. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the list in the database could be extended, otherwise ``False``. ''' pass def _data_from_dotnotation(self, key, default=None): ''' Returns the MongoDB data from a key using dot notation. Args: key (str): The key to the field in the workflow document. Supports MongoDB's dot notation for embedded fields. default (object): The default value that is returned if the key does not exist. Returns: object: The data for the specified key or the default value. ''' pass def _encode_value(self, value): ''' Encodes the value such that it can be stored into MongoDB. Any primitive types are stored directly into MongoDB, while non-primitive types are pickled and stored as GridFS objects. The id pointing to a GridFS object replaces the original value. Args: value (object): The object that should be encoded for storing in MongoDB. Returns: object: The encoded value ready to be stored in MongoDB. ''' pass def _decode_value(self, value): ''' Decodes the value by turning any binary data back into Python objects. The method searches for ObjectId values, loads the associated binary data from GridFS and returns the decoded Python object. Args: value (object): The value that should be decoded. Raises: DataStoreDecodingError: An ObjectId was found but the id is not a valid GridFS id. DataStoreDecodeUnknownType: The type of the specified value is unknown. Returns: object: The decoded value as a valid Python object. ''' pass def _delete_gridfs_data(self, data): ''' Delete all GridFS data that is linked by fields in the specified data. Args: data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object for any ObjectID is deleted. ''' pass
10
9
24
3
12
9
3
0.89
0
11
3
0
9
3
9
9
235
37
105
28
95
93
68
28
58
7
0
2
31
7,928
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/datastore.py
lightflow.models.datastore.DataStoreDocumentSection
class DataStoreDocumentSection: """ The different sections the data store document contains """ Meta = 'meta' Data = 'data'
class DataStoreDocumentSection: ''' The different sections the data store document contains ''' pass
1
1
0
0
0
0
0
0.33
0
0
0
0
0
0
0
0
4
0
3
3
2
1
3
3
2
0
0
0
0
7,929
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.AbortWorkflow
class AbortWorkflow(LightflowException): pass
class AbortWorkflow(LightflowException): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
14
2
0
2
1
1
0
2
1
1
0
5
0
0
7,930
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/dag.py
lightflow.models.dag.Dag
class Dag: """ A dag hosts a graph built from tasks and manages the task execution process. One ore more tasks, that are connected with each other, form a graph of tasks. The connection between tasks has a direction, indicating the flow of data from task to task. Closed loops are not allowed. Therefore, the graph topology employed here is referred to as a dag (directed acyclic graph). The dag class not only provides tools to build a task graph, but also manages the processing of the tasks in the right order by traversing the graph using a breadth-first search strategy. Please note: this class has to be serializable (e.g. by pickle) Args: name (str): The name of the dag. autostart (bool): Set to True in order to start the processing of the tasks upon the start of the workflow. queue (str): Name of the queue the dag should be scheduled to. schema (dict): A dictionary with the definition of the task graph. """ def __init__(self, name, *, autostart=True, queue=DefaultJobQueueName.Dag, schema=None): self._name = name self._autostart = autostart self._queue = queue self._schema = schema self._copy_counter = 0 self._workflow_name = None @property def name(self): """ Return the name of the dag. """ return self._name @property def autostart(self): """ Return whether the dag is automatically run upon the start of the workflow.""" return self._autostart @property def queue(self): """ Return the name of the queue the dag should be scheduled to.""" return self._queue @property def workflow_name(self): """ Return the name of the workflow this dag belongs to. """ return self._workflow_name @workflow_name.setter def workflow_name(self, name): """ Set the name of the workflow this dag belongs to. Args: name (str): The name of the workflow. """ self._workflow_name = name def define(self, schema, *, validate=True): """ Store the task graph definition (schema). The schema has to adhere to the following rules: A key in the schema dict represents a parent task and the value one or more children: {parent: [child]} or {parent: [child1, child2]} The data output of one task can be routed to a labelled input slot of successor tasks using a dictionary instead of a list for the children: {parent: {child1: 'positive', child2: 'negative'}} An empty slot name or None skips the creation of a labelled slot: {parent: {child1: '', child2: None}} Args: schema (dict): A dictionary with the schema definition. validate (bool): Set to True to validate the graph by checking whether it is a directed acyclic graph. """ self._schema = schema if validate: self.validate(self.make_graph(self._schema)) def run(self, config, workflow_id, signal, *, data=None): """ Run the dag by calling the tasks in the correct order. Args: config (Config): Reference to the configuration object from which the settings for the dag are retrieved. workflow_id (str): The unique ID of the workflow that runs this dag. signal (DagSignal): The signal object for dags. It wraps the construction and sending of signals into easy to use methods. data (MultiTaskData): The initial data that is passed on to the start tasks. Raises: DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops). ConfigNotDefinedError: If the configuration for the dag is empty. """ graph = self.make_graph(self._schema) # pre-checks self.validate(graph) if config is None: raise ConfigNotDefinedError() # create the celery app for submitting tasks celery_app = create_app(config) # the task queue for managing the current state of the tasks tasks = [] stopped = False # add all tasks without predecessors to the task list for task in nx.topological_sort(graph): task.workflow_name = self.workflow_name task.dag_name = self.name if len(list(graph.predecessors(task))) == 0: task.state = TaskState.Waiting tasks.append(task) def set_task_completed(completed_task): """ For each completed task, add all successor tasks to the task list. If they are not in the task list yet, flag them as 'waiting'. """ completed_task.state = TaskState.Completed for successor in graph.successors(completed_task): if successor not in tasks: successor.state = TaskState.Waiting tasks.append(successor) # process the task queue as long as there are tasks in it while tasks: if not stopped: stopped = signal.is_stopped # delay the execution by the polling time if config.dag_polling_time > 0.0: sleep(config.dag_polling_time) for i in range(len(tasks) - 1, -1, -1): task = tasks[i] # for each waiting task, wait for all predecessor tasks to be # completed. Then check whether the task should be skipped by # interrogating the predecessor tasks. if task.is_waiting: if stopped: task.state = TaskState.Stopped else: pre_tasks = list(graph.predecessors(task)) if all([p.is_completed for p in pre_tasks]): # check whether the task should be skipped run_task = task.has_to_run or len(pre_tasks) == 0 for pre in pre_tasks: if run_task: break # predecessor task is skipped and flag should # not be propagated if pre.is_skipped and not pre.propagate_skip: run_task = True # limits of a non-skipped predecessor task if not pre.is_skipped: if pre.celery_result.result.limit is not None: if task.name in [ n.name if isinstance(n, BaseTask) else n for n in pre.celery_result.result.limit]: run_task = True else: run_task = True task.is_skipped = not run_task # send the task to celery or, if skipped, mark it as completed if task.is_skipped: set_task_completed(task) else: # compose the input data from the predecessor tasks # output. Data from skipped predecessor tasks do not # contribute to the input data if len(pre_tasks) == 0: input_data = data else: input_data = MultiTaskData() for pt in [p for p in pre_tasks if not p.is_skipped]: slot = graph[pt][task]['slot'] input_data.add_dataset( pt.name, pt.celery_result.result.data.default_dataset, aliases=[slot] if slot is not None else None) task.state = TaskState.Running task.celery_result = celery_app.send_task( JobExecPath.Task, args=(task, workflow_id, input_data), queue=task.queue, routing_key=task.queue ) # flag task as completed elif task.is_running: if task.celery_completed: set_task_completed(task) elif task.celery_failed: task.state = TaskState.Aborted signal.stop_workflow() # cleanup task results that are not required anymore elif task.is_completed: if all([s.is_completed or s.is_stopped or s.is_aborted for s in graph.successors(task)]): if celery_app.conf.result_expires == 0: task.clear_celery_result() tasks.remove(task) # cleanup and remove stopped and aborted tasks elif task.is_stopped or task.is_aborted: if celery_app.conf.result_expires == 0: task.clear_celery_result() tasks.remove(task) def validate(self, graph): """ Validate the graph by checking whether it is a directed acyclic graph. Args: graph (DiGraph): Reference to a DiGraph object from NetworkX. Raises: DirectedAcyclicGraphInvalid: If the graph is not a valid dag. """ if not nx.is_directed_acyclic_graph(graph): raise DirectedAcyclicGraphInvalid(graph_name=self._name) @staticmethod def make_graph(schema): """ Construct the task graph (dag) from a given schema. Parses the graph schema definition and creates the task graph. Tasks are the vertices of the graph and the connections defined in the schema become the edges. A key in the schema dict represents a parent task and the value one or more children: {parent: [child]} or {parent: [child1, child2]} The data output of one task can be routed to a labelled input slot of successor tasks using a dictionary instead of a list for the children: {parent: {child1: 'positive', child2: 'negative'}} An empty slot name or None skips the creation of a labelled slot: {parent: {child1: '', child2: None}} The underlying graph library creates nodes automatically, when an edge between non-existing nodes is created. Args: schema (dict): A dictionary with the schema definition. Returns: DiGraph: A reference to the fully constructed graph object. Raises: DirectedAcyclicGraphUndefined: If the schema is not defined. """ if schema is None: raise DirectedAcyclicGraphUndefined() # sanitize the input schema such that it follows the structure: # {parent: {child_1: slot_1, child_2: slot_2, ...}, ...} sanitized_schema = {} for parent, children in schema.items(): child_dict = {} if children is not None: if isinstance(children, list): if len(children) > 0: child_dict = {child: None for child in children} else: child_dict = {None: None} elif isinstance(children, dict): for child, slot in children.items(): child_dict[child] = slot if slot != '' else None else: child_dict = {children: None} else: child_dict = {None: None} sanitized_schema[parent] = child_dict # build the graph from the sanitized schema graph = nx.DiGraph() for parent, children in sanitized_schema.items(): for child, slot in children.items(): if child is not None: graph.add_edge(parent, child, slot=slot) else: graph.add_node(parent) return graph def __deepcopy__(self, memo): """ Create a copy of the dag object. This method keeps track of the number of copies that have been made. The number is appended to the name of the copy. Args: memo (dict): a dictionary that keeps track of the objects that have already been copied. Returns: Dag: a copy of the dag object """ self._copy_counter += 1 new_dag = Dag('{}:{}'.format(self._name, self._copy_counter), autostart=self._autostart, queue=self._queue) new_dag._schema = deepcopy(self._schema, memo) return new_dag
class Dag: ''' A dag hosts a graph built from tasks and manages the task execution process. One ore more tasks, that are connected with each other, form a graph of tasks. The connection between tasks has a direction, indicating the flow of data from task to task. Closed loops are not allowed. Therefore, the graph topology employed here is referred to as a dag (directed acyclic graph). The dag class not only provides tools to build a task graph, but also manages the processing of the tasks in the right order by traversing the graph using a breadth-first search strategy. Please note: this class has to be serializable (e.g. by pickle) Args: name (str): The name of the dag. autostart (bool): Set to True in order to start the processing of the tasks upon the start of the workflow. queue (str): Name of the queue the dag should be scheduled to. schema (dict): A dictionary with the definition of the task graph. ''' def __init__(self, name, *, autostart=True, queue=DefaultJobQueueName.Dag, schema=None): pass @property def name(self): ''' Return the name of the dag. ''' pass @property def autostart(self): ''' Return whether the dag is automatically run upon the start of the workflow.''' pass @property def queue(self): ''' Return the name of the queue the dag should be scheduled to.''' pass @property def workflow_name(self): ''' Return the name of the workflow this dag belongs to. ''' pass @workflow_name.setter def workflow_name(self): ''' Set the name of the workflow this dag belongs to. Args: name (str): The name of the workflow. ''' pass def define(self, schema, *, validate=True): ''' Store the task graph definition (schema). The schema has to adhere to the following rules: A key in the schema dict represents a parent task and the value one or more children: {parent: [child]} or {parent: [child1, child2]} The data output of one task can be routed to a labelled input slot of successor tasks using a dictionary instead of a list for the children: {parent: {child1: 'positive', child2: 'negative'}} An empty slot name or None skips the creation of a labelled slot: {parent: {child1: '', child2: None}} Args: schema (dict): A dictionary with the schema definition. validate (bool): Set to True to validate the graph by checking whether it is a directed acyclic graph. ''' pass def run(self, config, workflow_id, signal, *, data=None): ''' Run the dag by calling the tasks in the correct order. Args: config (Config): Reference to the configuration object from which the settings for the dag are retrieved. workflow_id (str): The unique ID of the workflow that runs this dag. signal (DagSignal): The signal object for dags. It wraps the construction and sending of signals into easy to use methods. data (MultiTaskData): The initial data that is passed on to the start tasks. Raises: DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops). ConfigNotDefinedError: If the configuration for the dag is empty. ''' pass def set_task_completed(completed_task): ''' For each completed task, add all successor tasks to the task list. If they are not in the task list yet, flag them as 'waiting'. ''' pass def validate(self, graph): ''' Validate the graph by checking whether it is a directed acyclic graph. Args: graph (DiGraph): Reference to a DiGraph object from NetworkX. Raises: DirectedAcyclicGraphInvalid: If the graph is not a valid dag. ''' pass @staticmethod def make_graph(schema): ''' Construct the task graph (dag) from a given schema. Parses the graph schema definition and creates the task graph. Tasks are the vertices of the graph and the connections defined in the schema become the edges. A key in the schema dict represents a parent task and the value one or more children: {parent: [child]} or {parent: [child1, child2]} The data output of one task can be routed to a labelled input slot of successor tasks using a dictionary instead of a list for the children: {parent: {child1: 'positive', child2: 'negative'}} An empty slot name or None skips the creation of a labelled slot: {parent: {child1: '', child2: None}} The underlying graph library creates nodes automatically, when an edge between non-existing nodes is created. Args: schema (dict): A dictionary with the schema definition. Returns: DiGraph: A reference to the fully constructed graph object. Raises: DirectedAcyclicGraphUndefined: If the schema is not defined. ''' pass def __deepcopy__(self, memo): ''' Create a copy of the dag object. This method keeps track of the number of copies that have been made. The number is appended to the name of the copy. Args: memo (dict): a dictionary that keeps track of the objects that have already been copied. Returns: Dag: a copy of the dag object ''' pass
19
12
24
4
12
8
5
0.75
0
11
8
0
10
6
11
11
321
59
150
45
130
112
118
38
105
30
0
9
56
7,931
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.ConfigFieldError
class ConfigFieldError(RuntimeError): pass
class ConfigFieldError(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,932
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.ConfigNotDefinedError
class ConfigNotDefinedError(RuntimeError): pass
class ConfigNotDefinedError(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,933
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.ConfigOverwriteError
class ConfigOverwriteError(RuntimeError): pass
class ConfigOverwriteError(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,934
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DagNameUnknown
class DagNameUnknown(RuntimeError): pass
class DagNameUnknown(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,935
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DataStoreIDExists
class DataStoreIDExists(RuntimeError): pass
class DataStoreIDExists(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,936
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DataStoreGridfsIdInvalid
class DataStoreGridfsIdInvalid(RuntimeError): pass
class DataStoreGridfsIdInvalid(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,937
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DataStoreDecodeUnknownType
class DataStoreDecodeUnknownType(RuntimeError): pass
class DataStoreDecodeUnknownType(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,938
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.ConfigLoadError
class ConfigLoadError(RuntimeError): """ Raise this if there is a configuration loading error. """ pass
class ConfigLoadError(RuntimeError): ''' Raise this if there is a configuration loading error. ''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
1
1
1
2
1
1
0
4
0
0
7,939
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/action.py
lightflow.models.action.Action
class Action: """ The class for the action object that is returned by each task. The action object encapsulates the information that is returned by a task to the system. It contains the data that should be passed on to the successor tasks and a list of immediate successor tasks that should be executed. The latter allows to limit the execution of successor tasks. """ def __init__(self, data, limit=None): """ Initialise the Action object. Args: data (MultiTaskData): The processed data from the task that should be passed on to successor tasks. limit (list): A list of names of all immediate successor tasks that should be executed. """ self._data = data self._limit = limit @property def data(self): """ Returns the data object. """ return self._data @property def limit(self): """ Returns the list of tasks that should be executed. """ return self._limit def copy(self): """ Return a copy of the Action object. """ return copy(self)
class Action: ''' The class for the action object that is returned by each task. The action object encapsulates the information that is returned by a task to the system. It contains the data that should be passed on to the successor tasks and a list of immediate successor tasks that should be executed. The latter allows to limit the execution of successor tasks. ''' def __init__(self, data, limit=None): ''' Initialise the Action object. Args: data (MultiTaskData): The processed data from the task that should be passed on to successor tasks. limit (list): A list of names of all immediate successor tasks that should be executed. ''' pass @property def data(self): ''' Returns the data object. ''' pass @property def limit(self): ''' Returns the list of tasks that should be executed. ''' pass def copy(self): ''' Return a copy of the Action object. ''' pass
7
5
5
0
2
3
1
1.33
0
0
0
0
4
2
4
4
33
5
12
9
5
16
10
7
5
1
0
0
4
7,940
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/queue/models.py
lightflow.queue.models.WorkerStats
class WorkerStats: """ Represents the worker information returned from celery. Args: name (str): The name of the worker. broker (BrokerStats): A reference to a BrokerStats Object the worker is using. pid (int): The PID of the worker. process_pids (int): The PIDs of the concurrent task processes. concurrency (int): The number of concurrent processes. job_count (int): The number of jobs this worker has processed so far. queues (list): A list of QueueStats objects that represent the queues this worker is listening on. """ def __init__(self, name, broker, pid, process_pids, concurrency, job_count, queues): self.name = name self.broker = broker self.pid = pid self.process_pids = process_pids self.concurrency = concurrency self.job_count = job_count self.queues = queues @classmethod def from_celery(cls, name, worker_dict, queues): """ Create a WorkerStats object from the dictionary returned by celery. Args: name (str): The name of the worker. worker_dict (dict): The dictionary as returned by celery. queues (list): A list of QueueStats objects that represent the queues this worker is listening on. Returns: WorkerStats: A fully initialized WorkerStats object. """ return WorkerStats( name=name, broker=BrokerStats.from_celery(worker_dict['broker']), pid=worker_dict['pid'], process_pids=worker_dict['pool']['processes'], concurrency=worker_dict['pool']['max-concurrency'], job_count=worker_dict['pool']['writes']['total'], queues=queues ) def to_dict(self): """ Return a dictionary of the worker stats. Returns: dict: Dictionary of the stats. """ return { 'name': self.name, 'broker': self.broker.to_dict(), 'pid': self.pid, 'process_pids': self.process_pids, 'concurrency': self.concurrency, 'job_count': self.job_count, 'queues': [q.to_dict() for q in self.queues] }
class WorkerStats: ''' Represents the worker information returned from celery. Args: name (str): The name of the worker. broker (BrokerStats): A reference to a BrokerStats Object the worker is using. pid (int): The PID of the worker. process_pids (int): The PIDs of the concurrent task processes. concurrency (int): The number of concurrent processes. job_count (int): The number of jobs this worker has processed so far. queues (list): A list of QueueStats objects that represent the queues this worker is listening on. ''' def __init__(self, name, broker, pid, process_pids, concurrency, job_count, queues): pass @classmethod def from_celery(cls, name, worker_dict, queues): ''' Create a WorkerStats object from the dictionary returned by celery. Args: name (str): The name of the worker. worker_dict (dict): The dictionary as returned by celery. queues (list): A list of QueueStats objects that represent the queues this worker is listening on. Returns: WorkerStats: A fully initialized WorkerStats object. ''' pass def to_dict(self): ''' Return a dictionary of the worker stats. Returns: dict: Dictionary of the stats. ''' pass
5
3
15
1
10
4
1
0.77
0
1
1
0
2
7
3
3
61
6
31
13
25
24
13
11
9
1
0
0
3
7,941
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DataStoreIDInvalid
class DataStoreIDInvalid(RuntimeError): pass
class DataStoreIDInvalid(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,942
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.WorkflowArgumentError
class WorkflowArgumentError(RuntimeError): pass
class WorkflowArgumentError(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,943
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.WorkerEventTypeUnsupported
class WorkerEventTypeUnsupported(RuntimeError): pass
class WorkerEventTypeUnsupported(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,944
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.TaskReturnActionInvalid
class TaskReturnActionInvalid(RuntimeError): pass
class TaskReturnActionInvalid(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,945
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.StopTask
class StopTask(LightflowException): def __init__(self, message='', *, skip_successors=True): super().__init__(message) self.skip_successors = skip_successors
class StopTask(LightflowException): def __init__(self, message='', *, skip_successors=True): pass
2
0
3
0
3
0
1
0
1
1
0
0
1
1
1
15
4
0
4
3
2
0
4
3
2
1
5
0
1
7,946
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/config.py
lightflow.config.Config
class Config: """ Hosts the global configuration. The configuration is read from a structured YAML file or a dictionary. The location of the file can either be specified directly, is given in the environment variable LIGHTFLOW_CONFIG_ENV, is looked for in the current execution directory or in the home directory of the user. """ def __init__(self): self._config = None @classmethod def from_file(cls, filename, *, strict=True): """ Create a new Config object from a configuration file. Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Returns: An instance of the Config class. Raises: ConfigLoadError: If the configuration cannot be found. """ config = cls() config.load_from_file(filename, strict=strict) return config def load_from_file(self, filename=None, *, strict=True): """ Load the configuration from a file. The location of the configuration file can either be specified directly in the parameter filename or is searched for in the following order: 1. In the environment variable given by LIGHTFLOW_CONFIG_ENV 2. In the current execution directory 3. In the user's home directory Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Raises: ConfigLoadError: If the configuration cannot be found. """ self.set_to_default() if filename: self._update_from_file(filename) else: if LIGHTFLOW_CONFIG_ENV not in os.environ: if os.path.isfile(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)): self._update_from_file( os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)) elif os.path.isfile(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))): self._update_from_file( expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))) else: if strict: raise ConfigLoadError('Could not find the configuration file.') else: self._update_from_file(expand_env_var(os.environ[LIGHTFLOW_CONFIG_ENV])) self._update_python_paths() def load_from_dict(self, conf_dict=None): """ Load the configuration from a dictionary. Args: conf_dict (dict): Dictionary with the configuration. """ self.set_to_default() self._update_dict(self._config, conf_dict) self._update_python_paths() def to_dict(self): """ Returns a copy of the internal configuration as a dictionary. """ return dict(self._config) @property def workflows(self): """ Return the workflow folders """ return self._config.get('workflows') @property def data_store(self): """ Return the data store settings """ return self._config.get('store') @property def signal(self): """ Return the signal system settings """ return self._config.get('signal') @property def logging(self): """ Return the logging settings """ return self._config.get('logging') @property def celery(self): """ Return the celery settings """ return self._config.get('celery') @property def cli(self): """ Return the cli settings """ return self._config.get('cli') @property def extensions(self): """ Return the custom settings of extensions """ if 'extensions' not in self._config: raise ConfigFieldError( 'The extensions section is missing in the configuration') return self._config.get('extensions') @property def workflow_polling_time(self): """ Return the waiting time between status checks of the running dags (sec) """ if 'graph' not in self._config: raise ConfigFieldError('The graph section is missing in the configuration') return self._config.get('graph').get('workflow_polling_time') @property def dag_polling_time(self): """ Return the waiting time between status checks of the running tasks (sec) """ if 'graph' not in self._config: raise ConfigFieldError('The graph section is missing in the configuration') return self._config.get('graph').get('dag_polling_time') def set_to_default(self): """ Overwrite the configuration with the default configuration. """ self._config = yaml.safe_load(self.default()) def _update_from_file(self, filename): """ Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file. """ if os.path.exists(filename): try: with open(filename, 'r') as config_file: yaml_dict = yaml.safe_load(config_file.read()) if yaml_dict is not None: self._update_dict(self._config, yaml_dict) except IsADirectoryError: raise ConfigLoadError( 'The specified configuration file is a directory not a file') else: raise ConfigLoadError('The config file {} does not exist'.format(filename)) def _update_dict(self, to_dict, from_dict): """ Recursively merges the fields for two dictionaries. Args: to_dict (dict): The dictionary onto which the merge is executed. from_dict (dict): The dictionary merged into to_dict """ for key, value in from_dict.items(): if key in to_dict and isinstance(to_dict[key], dict) and \ isinstance(from_dict[key], dict): self._update_dict(to_dict[key], from_dict[key]) else: to_dict[key] = from_dict[key] def _update_python_paths(self): """ Append the workflow and libraries paths to the PYTHONPATH. """ for path in self._config['workflows'] + self._config['libraries']: if os.path.isdir(os.path.abspath(path)): if path not in sys.path: sys.path.append(path) else: raise ConfigLoadError( 'Workflow directory {} does not exist'.format(path)) @staticmethod def default(): """ Returns the default configuration. """ return ''' workflows: - ./examples libraries: [] celery: broker_url: redis://localhost:6379/0 result_backend: redis://localhost:6379/0 worker_concurrency: 8 result_expires: 0 worker_send_task_events: True worker_prefetch_multiplier: 1 signal: host: localhost port: 6379 password: null database: 0 polling_time: 0.5 store: host: localhost port: 27017 database: lightflow username: null password: null auth_source: admin auth_mechanism: null connect_timeout: 30000 graph: workflow_polling_time: 0.5 dag_polling_time: 0.5 cli: time_format: '%d/%m/%Y %H:%M:%S' extensions: {} logging: version: 1 disable_existing_loggers: false formatters: verbose: format: '[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s' datefmt: '%d/%m/%Y %H:%M:%S' simple: (): 'colorlog.ColoredFormatter' format: '%(log_color)s[%(asctime)s][%(levelname)s] %(blue)s%(processName)s%(reset)s | %(message)s' datefmt: '%d/%m/%Y %H:%M:%S' handlers: console: class: logging.StreamHandler level: INFO formatter: simple loggers: celery: handlers: - console level: INFO root: handlers: - console level: INFO '''
class Config: ''' Hosts the global configuration. The configuration is read from a structured YAML file or a dictionary. The location of the file can either be specified directly, is given in the environment variable LIGHTFLOW_CONFIG_ENV, is looked for in the current execution directory or in the home directory of the user. ''' def __init__(self): pass @classmethod def from_file(cls, filename, *, strict=True): ''' Create a new Config object from a configuration file. Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Returns: An instance of the Config class. Raises: ConfigLoadError: If the configuration cannot be found. ''' pass def load_from_file(self, filename=None, *, strict=True): ''' Load the configuration from a file. The location of the configuration file can either be specified directly in the parameter filename or is searched for in the following order: 1. In the environment variable given by LIGHTFLOW_CONFIG_ENV 2. In the current execution directory 3. In the user's home directory Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Raises: ConfigLoadError: If the configuration cannot be found. ''' pass def load_from_dict(self, conf_dict=None): ''' Load the configuration from a dictionary. Args: conf_dict (dict): Dictionary with the configuration. ''' pass def to_dict(self): ''' Returns a copy of the internal configuration as a dictionary. ''' pass @property def workflows(self): ''' Return the workflow folders ''' pass @property def data_store(self): ''' Return the data store settings ''' pass @property def signal(self): ''' Return the signal system settings ''' pass @property def logging(self): ''' Return the logging settings ''' pass @property def celery(self): ''' Return the celery settings ''' pass @property def cli(self): ''' Return the cli settings ''' pass @property def extensions(self): ''' Return the custom settings of extensions ''' pass @property def workflow_polling_time(self): ''' Return the waiting time between status checks of the running dags (sec) ''' pass @property def dag_polling_time(self): ''' Return the waiting time between status checks of the running tasks (sec) ''' pass def set_to_default(self): ''' Overwrite the configuration with the default configuration. ''' pass def _update_from_file(self, filename): ''' Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file. ''' pass def _update_dict(self, to_dict, from_dict): ''' Recursively merges the fields for two dictionaries. Args: to_dict (dict): The dictionary onto which the merge is executed. from_dict (dict): The dictionary merged into to_dict ''' pass def _update_python_paths(self): ''' Append the workflow and libraries paths to the PYTHONPATH. ''' pass @staticmethod def default(): ''' Returns the default configuration. ''' pass
31
19
11
1
8
3
2
0.37
0
4
2
0
17
1
19
19
253
41
155
37
124
57
74
25
54
6
0
4
35
7,947
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.RequestActionUnknown
class RequestActionUnknown(RuntimeError): pass
class RequestActionUnknown(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,948
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.LightflowException
class LightflowException(RuntimeError): """ Lightflow base class for all exceptions. """ def __init__(self, message=''): self.message = message def __str__(self): return self.message def __repr__(self): return "<LightflowException - {}>".format(self.message)
class LightflowException(RuntimeError): ''' Lightflow base class for all exceptions. ''' def __init__(self, message=''): pass def __str__(self): pass def __repr__(self): pass
4
1
2
0
2
0
1
0.14
1
0
0
2
3
1
3
14
10
2
7
5
3
1
7
5
3
1
4
0
3
7,949
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.JobStatInvalid
class JobStatInvalid(RuntimeError): pass
class JobStatInvalid(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,950
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.RequestFailed
class RequestFailed(RuntimeError): pass
class RequestFailed(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,951
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DataStoreNotConnected
class DataStoreNotConnected(RuntimeError): pass
class DataStoreNotConnected(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,952
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DirectedAcyclicGraphUndefined
class DirectedAcyclicGraphUndefined(RuntimeError): pass
class DirectedAcyclicGraphUndefined(RuntimeError):
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,953
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.DirectedAcyclicGraphInvalid
class DirectedAcyclicGraphInvalid(RuntimeError): def __init__(self, graph_name): """ Initialize the exception for invalid directed acyclic graphs. Args: graph_name (str): The name of the dag that is invalid. """ self.graph_name = graph_name
class DirectedAcyclicGraphInvalid(RuntimeError): def __init__(self, graph_name): ''' Initialize the exception for invalid directed acyclic graphs. Args: graph_name (str): The name of the dag that is invalid. ''' pass
2
1
7
1
2
4
1
1.33
1
0
0
0
1
1
1
12
8
1
3
3
1
4
3
3
1
1
4
0
1
7,954
AustralianSynchrotron/lightflow
AustralianSynchrotron_lightflow/lightflow/models/exceptions.py
lightflow.models.exceptions.JobEventTypeUnsupported
class JobEventTypeUnsupported(RuntimeError): pass
class JobEventTypeUnsupported(RuntimeError): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
11
2
0
2
1
1
0
2
1
1
0
4
0
0
7,955
Autodesk/aomi
Autodesk_aomi/tests/test_render.py
test_render.SecretKeyNameTest
class SecretKeyNameTest(unittest.TestCase): def getopt(self, op, args): args = parser_factory([op] + args)[1] return args def test_default(self): args = self.getopt('environment', ['foo']) assert secret_key_name('foo', 'baz', args) == 'foo_baz' def test_prefix(self): args = self.getopt('environment', ['foo', '--add-prefix', 'zoom',]) assert secret_key_name('foo', 'baz', args) == 'zoomfoo_baz' args = self.getopt('environment', ['foo', '--add-prefix', 'zoom', '--no-merge-path']) assert secret_key_name('foo', 'baz', args) == 'zoombaz' args = self.getopt('environment', ['foo', '--add-prefix', 'zoom', '--add-suffix', 'mooz', '--no-merge-path']) assert secret_key_name('foo', 'baz', args) == 'zoombazmooz' def test_suffix(self): opt = self.getopt('environment', ['foo', '--add-suffix', 'mooz']) assert secret_key_name('foo', 'baz', opt) == 'foo_bazmooz' opt = self.getopt('environment', ['foo', '--add-suffix', 'mooz', '--no-merge-path']) assert secret_key_name('foo', 'baz', opt) == 'bazmooz' opt = self.getopt('environment', ['foo', '--add-prefix', 'zoom', '--add-suffix', 'mooz']) assert secret_key_name('foo', 'baz', opt) == 'zoomfoo_bazmooz'
class SecretKeyNameTest(unittest.TestCase): def getopt(self, op, args): pass def test_default(self): pass def test_prefix(self): pass def test_suffix(self): pass
5
0
8
0
8
0
1
0
1
0
0
0
4
0
4
76
38
4
34
8
29
0
21
8
16
1
2
0
4
7,956
Autodesk/aomi
Autodesk_aomi/tests/test_render.py
test_render.TemplateTest
class TemplateTest(unittest.TestCase): def test_builtin(self): builtin_file = grok_template_file('builtin:foo') assert builtin_file.endswith('foo.j2') assert not builtin_file.startswith('builtin:') def test_normal(self): builtin_file = grok_template_file('/foo') assert builtin_file == '/foo'
class TemplateTest(unittest.TestCase): def test_builtin(self): pass def test_normal(self): pass
3
0
4
0
4
0
1
0
1
0
0
0
2
0
2
74
9
1
8
5
5
0
8
5
5
1
2
0
2
7,957
Autodesk/aomi
Autodesk_aomi/tests/test_validation.py
test_validation.SanitizeMount
class SanitizeMount(unittest.TestCase): def test_happy_path(self): assert aomi.validation.sanitize_mount('foo') == 'foo' assert aomi.validation.sanitize_mount('foo/bar') == 'foo/bar' def test_prefix(self): assert aomi.validation.sanitize_mount('/foo') == 'foo' assert aomi.validation.sanitize_mount('/foo/bar') == 'foo/bar' def test_suffix(self): assert aomi.validation.sanitize_mount('/foo') == 'foo' assert aomi.validation.sanitize_mount('/foo/bar') == 'foo/bar' def test_both(self): assert aomi.validation.sanitize_mount('/foo/') == 'foo' assert aomi.validation.sanitize_mount('/foo/bar/') == 'foo/bar'
class SanitizeMount(unittest.TestCase): def test_happy_path(self): pass def test_prefix(self): pass def test_suffix(self): pass def test_both(self): pass
5
0
3
0
3
0
1
0
1
0
0
0
4
0
4
76
16
3
13
5
8
0
13
5
8
1
2
0
4
7,958
Autodesk/aomi
Autodesk_aomi/tests/test_validation.py
test_validation.StringTests
class StringTests(unittest.TestCase): ghost_emoji = portable_b64decode('8J+Ruwo=') some_binary = portable_b64decode('uRo/OptvvkT790yaPjql5OItfFUBSM2tM42QJkPM7qvMTn4tQClPjB6mpdSFDtyzuqGVrMGaHRKv7XuzlZPpWGbVzlCjIvN0nOUiBXSQsockEJwCwIaiwm/xxWSE9+P2zWdqt1J/Iuwv6Rq60qpMRTqWNJD5dDzbw4VdDQhxzgK4zN2Er+JQQqQctsj1XuM8xJtzBQsozt5ZCJso4/jsUsWrFgHPp5nu4whuT7ZSgthsGz+NXo1f6v4njJ705ZMjLW0zdnkx/14E8qGJCsDs8pCkekDn+K4gTLfzZHga/du8xtN6e/X97K2BbdVC8Obz684wnqdHLWc+bNNso+5XFtQbFbK6vBtGtZNmBeiVBo594Zr5xRxFPSfOHIKz0jB4U5He7xgh2C7AFh2SCy4fW1fwC5XxQoz1pRSiFTRbUr/dMHMn0ZaspVYUNPdZccM4xj8ip5k4fXVRTKFF1qEiFGohcfLdabCBXAkckOmGogdN0swOpoiNEohYksW0bkof89q1aRJl6tM9E2spH62XZXDmQFHIdxFFHP6zAl2t7zGB2vxDCpLgQg3l8RytryMfDR7MXXXy2kbhtFpIl45gFl/8u+aOc7fP4dLxacCbJNz3cO3iMXIPytwiaq5HJbgQ6ZgeGjZBniTCRLwRpOv3l3GRsLstdRJSk2KP+kwY9Tk=') def test_is_unicode(self): assert aomi.validation.is_unicode_string("foo") == None assert aomi.validation.is_unicode_string("70758F21-946C-4C14-AD67-53DDCA5C9F4B") == None assert aomi.validation.is_unicode_string(self.ghost_emoji) == None with self.assertRaises(aomi.exceptions.Validation): aomi.validation.is_unicode_string(self.some_binary)
class StringTests(unittest.TestCase): def test_is_unicode(self): pass
2
0
6
0
6
0
1
0
1
1
1
0
1
0
1
73
10
1
9
4
7
0
9
4
7
1
2
1
1
7,959
Autodesk/aomi
Autodesk_aomi/tests/test_validation.py
test_validation.UnicodeTests
class UnicodeTests(unittest.TestCase): def test_is_unicode(self): assert aomi.validation.is_unicode(str("test a thing"))
class UnicodeTests(unittest.TestCase): def test_is_unicode(self): pass
2
0
2
0
2
0
1
0
1
1
0
0
1
0
1
73
3
0
3
2
1
0
3
2
1
1
2
0
1
7,960
Autodesk/aomi
Autodesk_aomi/tests/test_helpers.py
test_helpers.FilePathTest
class FilePathTest(unittest.TestCase): def test_subdir_happy_path(self): assert aomi.helpers.subdir_path("/a/b/c", "/a/b/.gitignore") == "c" assert aomi.helpers.subdir_path("/a/b/c/d", "/a/b/.gitignore") == "c/d" def test_subdir_missing(self): assert aomi.helpers.subdir_path("/a/b/c", "c/d") is None def test_subdir_external(self): assert aomi.helpers.subdir_path("/a/b/c", "/d/e") is None
class FilePathTest(unittest.TestCase): def test_subdir_happy_path(self): pass def test_subdir_missing(self): pass def test_subdir_external(self): pass
4
0
2
0
2
0
1
0
1
0
0
0
3
0
3
75
10
2
8
4
4
0
8
4
4
1
2
0
3
7,961
Autodesk/aomi
Autodesk_aomi/tests/test_vault.py
test_vault.HelperTest
class HelperTest(unittest.TestCase): def test_seconds_to_seconds(self): assert grok_seconds('1s') == 1 assert grok_seconds('60s') == 60 assert grok_seconds('120s') == 120 def test_minutes_to_seconds(self): assert grok_seconds('1m') == 60 assert grok_seconds('60m') == 3600 def test_hours_to_seconds(self): assert grok_seconds('1h') == 3600 assert grok_seconds('24h') == 86400 def test_is_aws(self): assert is_aws({'access_key': True, 'secret_key': True}) assert is_aws({'access_key': True, 'secret_key': True, 'security_token': True}) def test_is_not_aws(self): assert not is_aws({'aaa': True})
class HelperTest(unittest.TestCase): def test_seconds_to_seconds(self): pass def test_minutes_to_seconds(self): pass def test_hours_to_seconds(self): pass def test_is_aws(self): pass def test_is_not_aws(self): pass
6
0
3
0
3
0
1
0
1
0
0
0
5
0
5
77
20
4
16
6
10
0
16
6
10
1
2
0
5
7,962
Autodesk/aomi
Autodesk_aomi/tests/test_helpers.py
test_helpers.CliHashTest
class CliHashTest(unittest.TestCase): def test_happy_path(self): assert aomi.helpers.cli_hash(["foo=bar"]) == {'foo': 'bar'} assert aomi.helpers.cli_hash(["foo=bar", "baz=bam"]) == {'foo': 'bar', 'baz': 'bam'}
class CliHashTest(unittest.TestCase): def test_happy_path(self): pass
2
0
3
0
3
0
1
0
1
0
0
0
1
0
1
73
4
0
4
2
2
0
4
2
2
1
2
0
1
7,963
Autodesk/aomi
Autodesk_aomi/tests/test_cli.py
test_cli.OpParserTest
class OpParserTest(unittest.TestCase): def enabled_options(self, operations, option): for op in operations: self.assertTrue(option.replace('-', '_') in aomi.cli.parser_factory(op)[1].__dict__) def disabled_options(self, operations, option): for op in operations: self.assertFalse(option.replace('-', '_') in aomi.cli.parser_factory(op)[1].__dict__) def test_secretfile_option(self): self.enabled_options([['seed'], ['freeze', 'fake'], ['thaw', 'fake']], 'secretfile') self.disabled_options([['environment', 'foo'], ['extract_file', 'foo', 'bar'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['set_password', 'foo'], ['token']], 'secretfiles') def test_policies_option(self): self.enabled_options([['seed'], ['thaw', 'foo'], ['freeze', 'foo']], 'policies') self.disabled_options([['environment', 'foo'], ['extract_file', 'foo', 'bar'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['set_password', 'foo'], ['token']], 'policies') def test_secrets_option(self): self.enabled_options([['seed'], ['freeze', 'foo'], ['thaw', 'foo']], 'secrets') self.disabled_options([['environment', 'foo'], ['extract_file', 'foo', 'bar'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['set_password', 'foo'], ['token']], 'secrets') def test_mount_only_option(self): self.enabled_options([['seed']], 'mount-only') self.disabled_options([['environment', 'foo'], ['extract_file', 'foo', 'bar'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['set_password', 'foo'], ['token'], ['freeze', 'foo'], ['thaw', 'foo']], 'mount-only') def test_prefix_option(self): self.enabled_options([['environment', 'foo']], 'prefix') self.disabled_options([['seed'], ['extract_file', 'foo', 'bar'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'prefix') def test_add_prefix_option(self): self.enabled_options([['environment', 'foo'], ['template', 'foo', 'bar', 'baz']], 'add-prefix') self.disabled_options([['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'add-prefix') def test_suffix_option(self): self.enabled_options([['environment', 'foo'], ['template', 'foo', 'bar', 'baz']], 'add-suffix') self.disabled_options([['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'add-suffix') def test_merge_path_option(self): self.enabled_options([['environment', 'foo'], ['template', 'foo', 'bar', 'baz']], 'merge-path') self.disabled_options([['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'merge-path') def test_verbose_option(self): self.enabled_options([['environment', 'foo'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'verbose') def test_metadata_option(self): self.enabled_options([['environment', 'foo'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'metadata') def test_lease_option(self): self.enabled_options([['environment', 'foo'], ['aws_environment', 'foo'], ['template', 'foo', 'bar', 'baz'], ['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'lease') def test_export_option(self): self.enabled_options([['environment', 'foo'], ['aws_environment', 'foo']], 'export') self.disabled_options([['template', 'foo', 'bar', 'baz'], ['seed'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['freeze', 'foo'], ['thaw', 'foo'], ['token']], 'export') def test_extra_vars_option(self): self.enabled_options([['template', 'foo', 'bar', 'baz'], ['freeze', 'foo'], ['thaw', 'foo'], ['seed']], 'extra-vars') self.disabled_options([['environment', 'foo'], ['aws_environment', 'foo'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['token']], 'extra-vars') def test_extra_vars_file_option(self): self.enabled_options([['template', 'foo', 'bar', 'baz'], ['freeze', 'foo'], ['thaw', 'foo'], ['seed']], 'extra-vars-file') self.disabled_options([['environment', 'foo'], ['aws_environment', 'foo'], ['extract_file', 'foo', 'bar'], ['set_password', 'foo'], ['token']], 'extra-vars-file')
class OpParserTest(unittest.TestCase): def enabled_options(self, operations, option): pass def disabled_options(self, operations, option): pass def test_secretfile_option(self): pass def test_policies_option(self): pass def test_secrets_option(self): pass def test_mount_only_option(self): pass def test_prefix_option(self): pass def test_add_prefix_option(self): pass def test_suffix_option(self): pass def test_merge_path_option(self): pass def test_verbose_option(self): pass def test_metadata_option(self): pass def test_lease_option(self): pass def test_export_option(self): pass def test_extra_vars_option(self): pass def test_extra_vars_file_option(self): pass
17
0
9
0
9
0
1
0
1
0
0
0
16
0
16
88
167
23
144
19
127
0
46
19
29
2
2
1
18
7,964
Autodesk/aomi
Autodesk_aomi/aomi/model/ssh.py
aomi.model.ssh.SSHRole
class SSHRole(Secret): """SSH Credential Backend""" resource_key = 'ssh_creds' required_fields = ['key_type'] backend = 'ssh' def __init__(self, obj, opt): super(SSHRole, self).__init__(obj, opt) self.mount = sanitize_mount(obj.get('mount', 'ssh')) a_name = obj.get('name', obj['ssh_creds']) self.path = "%s/roles/%s" % (self.mount, a_name) self._obj = { 'key_type': obj['key_type'] } if 'cidr_list' in obj: self._obj['cidr_list'] = ','.join(obj['cidr_list']) if 'default_user' in obj: self._obj['default_user'] = obj['default_user'] self.tunable(obj)
class SSHRole(Secret): '''SSH Credential Backend''' def __init__(self, obj, opt): pass
2
1
15
2
13
0
3
0.06
1
1
0
0
1
3
1
22
21
3
17
9
15
1
15
9
13
3
3
1
3
7,965
Autodesk/aomi
Autodesk_aomi/aomi/model/resource.py
aomi.model.resource.Secret
class Secret(Resource): """Vault Secrets These Vault resources will have some kind of secret backend underneath them. Seems to work with generic and AWS""" config_key = 'secrets'
class Secret(Resource): '''Vault Secrets These Vault resources will have some kind of secret backend underneath them. Seems to work with generic and AWS''' pass
1
1
0
0
0
0
0
1.5
1
0
0
3
0
0
0
21
5
0
2
2
1
3
2
2
1
0
2
0
0
7,966
Autodesk/aomi
Autodesk_aomi/tests/test_validation.py
test_validation.VaultPathTest
class VaultPathTest(unittest.TestCase): def setUp(self): self.args = parser_factory(['seed'])[1] def test_happy_path(self): self.assertTrue(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertTrue(aomi.validation.specific_path_check('foo/bam', self.args)) def test_include(self): self.args.include = ['foo/bar'] self.assertTrue(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/bam', self.args)) def test_exclude(self): self.args.exclude = ['foo/bar'] self.assertFalse(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertTrue(aomi.validation.specific_path_check('foo/bam', self.args)) def test_both(self): self.args.exclude = ['foo/bar'] self.args.include = ['foo/bar'] self.assertFalse(aomi.validation.specific_path_check('foo/bar', self.args)) def test_include_multiple(self): self.args.include = ['foo/bar', 'foo/baz'] self.assertTrue(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertTrue(aomi.validation.specific_path_check('foo/baz', self.args)) def test_exclude_multiple(self): self.args.exclude = ['foo/bar', 'foo/baz'] self.assertFalse(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/baz', self.args)) def test_both(self): self.args.exclude = ['foo/bar', 'foo/baz'] self.args.include = ['foo/bar', 'foo/baz'] self.assertFalse(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/baz', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/bam', self.args)) def test_complex(self): self.args.include = ['foo/bar', 'foo/bam'] self.args.exclude = ['foo/baz', 'foo/bam'] self.assertTrue(aomi.validation.specific_path_check('foo/bar', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/bom', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/bam', self.args)) self.assertFalse(aomi.validation.specific_path_check('foo/baz', self.args))
class VaultPathTest(unittest.TestCase): def setUp(self): pass def test_happy_path(self): pass def test_include(self): pass def test_exclude(self): pass def test_both(self): pass def test_include_multiple(self): pass def test_exclude_multiple(self): pass def test_both(self): pass def test_complex(self): pass
10
0
4
0
4
0
1
0
1
0
0
0
9
1
9
81
47
8
39
11
29
0
39
11
29
1
2
0
9
7,967
Autodesk/aomi
Autodesk_aomi/aomi/vault.py
aomi.vault.Client
class Client(hvac.Client): """Our Vault Client Wrapper This class will pass the existing hvac bits through. When interacting with cubbyhole paths, it will use the non operational token in order to preserve access.""" # dat hvac tho # pylint: disable=too-many-arguments def __init__(self, _url=None, token=None, _cert=None, _verify=True, _timeout=30, _proxies=None, _allow_redirects=True, _session=None): self.version = None self.vault_addr = os.environ.get('VAULT_ADDR') if not self.vault_addr: raise aomi.exceptions.AomiError('VAULT_ADDR is undefined or empty') if not self.vault_addr.startswith("http"): raise aomi.exceptions.AomiError('VAULT_ADDR must be a URL') ssl_verify = True if 'VAULT_SKIP_VERIFY' in os.environ: if os.environ['VAULT_SKIP_VERIFY'] == '1': import urllib3 urllib3.disable_warnings() ssl_verify = False self.initial_token = None self.operational_token = None session = requests.Session() retries = Retry(total=5, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retries) session.mount('https://', adapter) session.mount('http://', adapter) super(Client, self).__init__(url=self.vault_addr, verify=ssl_verify, session=session) def server_version(self): """Attempts to determine the version of Vault that a server is running. Some actions will change on older Vault deployments.""" health_url = "%s/v1/sys/health" % self.vault_addr resp = self.session.request('get', health_url, **self._kwargs) if resp.status_code == 200 or resp.status_code == 429: blob = resp.json() if 'version' in blob: return blob['version'] else: raise aomi.exceptions.VaultProblem('Health check failed') return None def connect(self, opt): """This sets up the tokens we expect to see in a way that hvac also expects.""" if not self._kwargs['verify']: LOG.warning('Skipping SSL Validation!') self.version = self.server_version() self.token = self.init_token() my_token = self.lookup_token() if not my_token or 'data' not in my_token: raise aomi.exceptions.AomiCredentials('initial token') display_name = my_token['data']['display_name'] vsn_string = "" if self.version: vsn_string = ", v%s" % self.version else: LOG.warning("Unable to deterine Vault version. Not all " "functionality is supported") LOG.info("Connected to %s as %s%s", self._url, display_name, vsn_string) if opt.reuse_token: LOG.debug("Not creating operational token") self.initial_token = self.token self.operational_token = self.token else: self.initial_token = self.token self.operational_token = self.op_token(display_name, opt) if not self.is_authenticated(): raise aomi.exceptions.AomiCredentials('operational token') self.token = self.operational_token return self def init_token(self): """Generate our first token based on workstation configuration""" app_filename = appid_file() token_filename = token_file() approle_filename = approle_file() token = None if 'VAULT_ROLE_ID' in os.environ and \ 'VAULT_SECRET_ID' in os.environ and \ os.environ['VAULT_ROLE_ID'] and os.environ['VAULT_SECRET_ID']: token = approle_token(self, os.environ['VAULT_ROLE_ID'], os.environ['VAULT_SECRET_ID']) LOG.debug("Token derived from VAULT_ROLE_ID and VAULT_SECRET_ID") elif 'VAULT_TOKEN' in os.environ and os.environ['VAULT_TOKEN']: LOG.debug('Token derived from VAULT_TOKEN environment variable') token = os.environ['VAULT_TOKEN'].strip() elif 'VAULT_USER_ID' in os.environ and \ 'VAULT_APP_ID' in os.environ and \ os.environ['VAULT_USER_ID'] and os.environ['VAULT_APP_ID']: LOG.debug("Token derived from VAULT_APP_ID and VAULT_USER_ID") token = app_token(self, os.environ['VAULT_APP_ID'].strip(), os.environ['VAULT_USER_ID'].strip()) elif approle_filename: creds = yaml.safe_load(open(approle_filename).read().strip()) if 'role_id' in creds and 'secret_id' in creds: LOG.debug("Token derived from approle file") token = approle_token(self, creds['role_id'], creds['secret_id']) elif token_filename: LOG.debug("Token derived from %s", token_filename) try: token = open(token_filename, 'r').read().strip() except IOError as os_exception: if os_exception.errno == 21: raise aomi.exceptions.AomiFile('Bad Vault token file') raise elif app_filename: token = yaml.safe_load(open(app_filename).read().strip()) if 'app_id' in token and 'user_id' in token: LOG.debug("Token derived from %s", app_filename) token = app_token(self, token['app_id'], token['user_id']) else: raise aomi.exceptions.AomiCredentials('unknown method') return token def op_token(self, display_name, opt): """Return a properly annotated token for our use. This token will be revoked at the end of the session. The token will have some decent amounts of metadata tho.""" args = { 'lease': opt.lease, 'display_name': display_name, 'meta': token_meta(opt) } try: token = self.create_token(**args) except (hvac.exceptions.InvalidRequest, hvac.exceptions.Forbidden) as vault_exception: if vault_exception.errors[0] == 'permission denied': emsg = "Permission denied creating operational token" raise aomi.exceptions.AomiCredentials(emsg) else: raise LOG.debug("Created operational token with lease of %s", opt.lease) return token['auth']['client_token'] def read(self, path, wrap_ttl=None): """Wrap the hvac read call, using the right token for cubbyhole interactions.""" path = sanitize_mount(path) if path.startswith('cubbyhole'): self.token = self.initial_token val = super(Client, self).read(path, wrap_ttl) self.token = self.operational_token return val return super(Client, self).read(path, wrap_ttl) def write(self, path, wrap_ttl=None, **kwargs): """Wrap the hvac write call, using the right token for cubbyhole interactions.""" path = sanitize_mount(path) val = None if path.startswith('cubbyhole'): self.token = self.initial_token val = super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs) self.token = self.operational_token else: super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs) return val def delete(self, path): """Wrap the hvac delete call, using the right token for cubbyhole interactions.""" path = sanitize_mount(path) val = None if path.startswith('cubbyhole'): self.token = self.initial_token val = super(Client, self).delete(path) self.token = self.operational_token else: super(Client, self).delete(path) return val
class Client(hvac.Client): '''Our Vault Client Wrapper This class will pass the existing hvac bits through. When interacting with cubbyhole paths, it will use the non operational token in order to preserve access.''' def __init__(self, _url=None, token=None, _cert=None, _verify=True, _timeout=30, _proxies=None, _allow_redirects=True, _session=None): pass def server_version(self): '''Attempts to determine the version of Vault that a server is running. Some actions will change on older Vault deployments.''' pass def connect(self, opt): '''This sets up the tokens we expect to see in a way that hvac also expects.''' pass def init_token(self): '''Generate our first token based on workstation configuration''' pass def op_token(self, display_name, opt): '''Return a properly annotated token for our use. This token will be revoked at the end of the session. The token will have some decent amounts of metadata tho.''' pass def read(self, path, wrap_ttl=None): '''Wrap the hvac read call, using the right token for cubbyhole interactions.''' pass def write(self, path, wrap_ttl=None, **kwargs): '''Wrap the hvac write call, using the right token for cubbyhole interactions.''' pass def delete(self, path): '''Wrap the hvac delete call, using the right token for cubbyhole interactions.''' pass
9
8
24
2
20
2
4
0.13
1
8
4
0
8
5
8
8
204
24
159
40
147
21
121
36
111
11
1
3
34
7,968
Autodesk/aomi
Autodesk_aomi/aomi/model/generic.py
aomi.model.generic.Generic
class Generic(Secret): """Generic Secrets""" backend = 'generic' def __init__(self, obj, opt): super(Generic, self).__init__(obj, opt) self.mount = sanitize_mount(obj['mount']) self.path = "%s/%s" % (self.mount, obj['path'])
class Generic(Secret): '''Generic Secrets''' def __init__(self, obj, opt): pass
2
1
4
0
4
0
1
0.17
1
1
0
3
1
2
1
22
8
1
6
5
4
1
6
5
4
1
3
0
1
7,969
Autodesk/aomi
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.AppRoleSecret
class AppRoleSecret(Resource): """Approle Secret""" child = True def __str__(self): return "AppRole Secret %s %s" % (self.role_name, self.secret_name) def __init__(self, obj, opt): self.role_name = obj['role_name'] self.secret_name = obj['name'] self.filename = obj['filename'] self.opt = opt super(AppRoleSecret, self).__init__(obj, opt) def diff(self, obj=None): if self.existing and 'secret_id_accessor' in self.existing: return NOOP return ADD def obj(self): filename = hard_path(self.filename, self.opt.secrets) aomi.validation.secret_file(filename) handle = open(filename, 'r') s_obj = { 'role_name': self.role_name, 'secret_name': self.secret_name, 'secret_id': handle.read().strip() } handle.close() return s_obj def secrets(self): return [self.filename] @wrap_vault("writing") def write(self, client): s_obj = self.obj() secret_id = s_obj['secret_id'] del s_obj['secret_id'] client.create_role_custom_secret_id(self.role_name, secret_id, s_obj) @wrap_vault("reading") def read(self, client): try: return client.get_role_secret_id(self.role_name, self.obj()['secret_id']) except hvac.exceptions.InvalidPath: return None except hvac.exceptions.InternalServerError as vault_excep: e_msg = vault_excep.errors[0] if "role %s does not exist" % self.role_name in e_msg: return None raise except ValueError as an_excep: if str(an_excep).startswith('No JSON object'): return None raise @wrap_vault("deleting") def delete(self, client): client.delete_role_secret_id(self.role_name, self.obj()['secret_id'])
class AppRoleSecret(Resource): '''Approle Secret''' def __str__(self): pass def __init__(self, obj, opt): pass def diff(self, obj=None): pass def obj(self): pass def secrets(self): pass @wrap_vault("writing") def write(self, client): pass @wrap_vault("reading") def read(self, client): pass @wrap_vault("deleting") def delete(self, client): pass
12
1
7
0
6
0
2
0.02
1
3
0
0
8
4
8
29
67
11
55
25
43
1
44
20
35
6
2
2
14
7,970
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.LDAPUser
class LDAPUser(Resource): """LDAP User Membership""" required_fields = ['user'] config_key = 'ldap_users' def __init__(self, obj, opt): super(LDAPUser, self).__init__(obj, opt) self.path = sanitize_mount("auth/%s/users/%s" % (obj.get('mount', 'ldap'), obj['user'])) self._obj = {} map_val(self._obj, obj, 'groups', []) map_val(self._obj, obj, 'policies', []) def obj(self): return { 'groups': ','.join(sorted(self._obj.get('groups', []))), 'policies': ','.join(sorted(self._obj.get('policies', []))) }
class LDAPUser(Resource): '''LDAP User Membership''' def __init__(self, obj, opt): pass def obj(self): pass
3
1
6
0
6
0
1
0.07
1
1
0
0
2
2
2
23
18
2
15
7
12
1
11
7
8
1
2
0
2
7,971
Autodesk/aomi
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.AppRole
class AppRole(Auth): """AppRole""" required_fields = ['name', 'policies'] config_key = 'approles' def resources(self): return [self] + self.secret_ids def __init__(self, obj, opt): super(AppRole, self).__init__('approle', obj, opt) self.app_name = obj['name'] self.mount = 'approle' self.path = "%s/role/%s" % (self.mount, self.app_name) self.secret_ids = [] self.tunable(obj) policies = obj['policies'] # HCV seems to always add this in anyway. Having this implicit # at our end makes the diff'ing easier. if 'default' not in policies: policies.insert(0, 'default') role_obj = { 'policies': ','.join(sorted(policies)) } map_val(role_obj, obj, 'bound_cidr_list', '', 'cidr_list') map_val(role_obj, obj, 'secret_id_num_uses', 0, 'secret_uses') map_val(role_obj, obj, 'secret_id_ttl', 0, 'secret_ttl') map_val(role_obj, obj, 'period', 0) map_val(role_obj, obj, 'token_max_ttl', 0) map_val(role_obj, obj, 'token_ttl', 0) map_val(role_obj, obj, 'bind_secret_id', True) map_val(role_obj, obj, 'token_num_uses', 0) self._obj = role_obj if 'preset' in obj: self.presets(obj['preset'], opt) def presets(self, presets, opt): """Will create representational objects for any preset (push) based AppRole Secrets.""" for preset in presets: secret_obj = dict(preset) secret_obj['role_name'] = self.app_name self.secret_ids.append(AppRoleSecret(secret_obj, opt)) def diff(self, obj=None): obj = dict(self.obj()) obj['policies'] = obj['policies'].split(',') obj['policies'] = sorted(obj['policies']) return super(AppRole, self).diff(obj) @wrap_vault("writing") def write(self, client): client.create_role(self.app_name, **self.obj()) @wrap_vault("reading") def read(self, client): try: return client.get_role(self.app_name) except hvac.exceptions.InvalidPath: return None @wrap_vault("deleting") def delete(self, client): client.delete_role(self.app_name)
class AppRole(Auth): '''AppRole''' def resources(self): pass def __init__(self, obj, opt): pass def presets(self, presets, opt): '''Will create representational objects for any preset (push) based AppRole Secrets.''' pass def diff(self, obj=None): pass @wrap_vault("writing") def write(self, client): pass @wrap_vault("reading") def read(self, client): pass @wrap_vault("deleting") def delete(self, client): pass
11
2
7
0
6
1
2
0.1
1
3
1
0
7
5
7
29
64
8
51
22
40
5
46
19
38
3
3
1
11
7,972
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.LDAP
class LDAP(Auth): """LDAP Authentication""" required_fields = ['url'] config_key = 'ldap_auth' def __init__(self, obj, opt): super(LDAP, self).__init__('ldap', obj, opt) auth_obj = { 'url': obj['url'] } self.mount = obj.get('mount', 'ldap') self.path = sanitize_mount("auth/%s/config" % self.mount) self.secret = obj.get('secrets') map_val(auth_obj, obj, 'starttls', False) map_val(auth_obj, obj, 'insecure_tls', False) map_val(auth_obj, obj, 'discoverdn') map_val(auth_obj, obj, 'userdn') map_val(auth_obj, obj, 'userattr') map_val(auth_obj, obj, 'deny_null_bind', True) map_val(auth_obj, obj, 'upndomain') map_val(auth_obj, obj, 'groupfilter') map_val(auth_obj, obj, 'groupdn') map_val(auth_obj, obj, 'groupattr') map_val(auth_obj, obj, 'binddn') map_val(auth_obj, obj, 'tls_max_version') map_val(auth_obj, obj, 'tls_min_version') self._obj = auth_obj self.tunable(obj) def secrets(self): if self.secret: return [self.secret] return [] def obj(self): ldap_obj = self._obj if self.secret: filename = hard_path(self.secret, self.opt.secrets) secret_file(filename) s_obj = load_var_file(filename, load_vars(self.opt)) for obj_k, obj_v in iteritems(s_obj): ldap_obj[obj_k] = obj_v return ldap_obj
class LDAP(Auth): '''LDAP Authentication''' def __init__(self, obj, opt): pass def secrets(self): pass def obj(self): pass
4
1
13
1
12
0
2
0.03
1
1
0
0
3
4
3
25
45
5
39
15
35
1
37
15
33
3
3
2
6
7,973
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.DUOAccess
class DUOAccess(Resource): """DUO API Access Credentials""" child = True def export(self, _directory): pass def secrets(self): return [self.secret] def obj(self): filename = hard_path(self.filename, self.opt.secrets) aomi.validation.secret_file(filename) obj = yaml.safe_load(open(filename).read()) return { 'host': self.host, 'skey': obj['secret'], 'ikey': obj['key'] } def diff(self, obj=None): return Resource.diff_write_only(self) def __init__(self, duo, secret, opt): s_obj = { 'state': 'present' } if not duo.present: s_obj['state'] = 'absent' super(DUOAccess, self).__init__(s_obj, opt) self.backend = duo.backend self.path = "auth/%s/duo/access" % self.backend self.filename = secret self.secret = secret self.host = duo.host def fetch(self, vault_client): mfa_config = vault_client.read("auth/%s/mfa_config" % self.backend) self.existing = mfa_config and mfa_config['data']['type'] == 'duo'
class DUOAccess(Resource): '''DUO API Access Credentials''' def export(self, _directory): pass def secrets(self): pass def obj(self): pass def diff(self, obj=None): pass def __init__(self, duo, secret, opt): pass def fetch(self, vault_client): pass
7
1
5
0
5
0
1
0.06
1
1
0
0
6
6
6
27
41
7
32
18
25
2
26
18
19
2
2
1
7
7,974
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.DUO
class DUO(Auth): """DUO MFA Authentication Backend Decorator""" required_fields = ['host', 'creds', 'backend'] resource = 'DUO MFA' config_key = 'duo' def resources(self): return [self, self.access] def __init__(self, obj, opt): super(DUO, self).__init__(obj['backend'], obj, opt) self.path = "auth/%s/mfa_config" % self.backend self.host = obj['host'] self.mount = self.backend self._obj = {'type': 'duo'} self.access = DUOAccess(self, obj['creds'], opt)
class DUO(Auth): '''DUO MFA Authentication Backend Decorator''' def resources(self): pass def __init__(self, obj, opt): pass
3
1
5
0
5
0
1
0.15
1
2
1
0
2
5
2
24
17
2
13
11
10
2
13
11
10
1
3
0
2
7,975
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.AppUser
class AppUser(Resource): """App User""" required_fields = ['id'] child = True def __init__(self, app, obj, opt): super(AppUser, self).__init__(obj, opt) self.path = "auth/app-id/map/user-id/%s" % obj['id'] self._obj = { 'value': app.app_name } if 'cidr' in obj: self._obj['cidr'] = obj['cidr']
class AppUser(Resource): '''App User''' def __init__(self, app, obj, opt): pass
2
1
8
0
8
0
2
0.09
1
1
0
0
1
2
1
22
13
1
11
6
9
1
9
6
7
2
2
1
2
7,976
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.VaultProblem
class VaultProblem(AomiError): """Something is wrong with Vault itself. Network, sealed, but it's at the point where we can't even validate if the data is there""" catmsg = 'Vault Problem'
class VaultProblem(AomiError): '''Something is wrong with Vault itself. Network, sealed, but it's at the point where we can't even validate if the data is there''' pass
1
1
0
0
0
0
0
1.5
1
0
0
0
0
0
0
11
5
0
2
2
1
3
2
2
1
0
4
0
0
7,977
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.VaultData
class VaultData(AomiError): """Something is wrong with data received from Vault. Usually indicates aomi trying to interact with something manually created""" catmsg = 'Unexpected Vault Data Woe'
class VaultData(AomiError): '''Something is wrong with data received from Vault. Usually indicates aomi trying to interact with something manually created''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
11
4
0
2
2
1
2
2
2
1
0
4
0
0
7,978
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.UserPass
class UserPass(Auth): """UserPass Authentication Backend""" config_key = 'userpass' no_resource = True def __init__(self, obj, opt): super(UserPass, self).__init__('userpass', obj, opt) self.tunable(obj) self.mount = obj.get('path', 'userpass') self.path = "auth/%s" % self.mount
class UserPass(Auth): '''UserPass Authentication Backend''' def __init__(self, obj, opt): pass
2
1
5
0
5
0
1
0.13
1
1
0
0
1
2
1
23
10
1
8
6
6
1
8
6
6
1
3
0
1
7,979
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.VaultConstraint
class VaultConstraint(AomiError): """Vault is imposing constraints on us. Permission or pathing generally""" catmsg = 'A Vault Constraint Exists'
class VaultConstraint(AomiError): '''Vault is imposing constraints on us. Permission or pathing generally''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,980
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.KeybaseAPI
class KeybaseAPI(AomiError): """Covers errors related to the keybase API""" catmsg = 'Something wrong with Keybase integration'
class KeybaseAPI(AomiError): '''Covers errors related to the keybase API''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,981
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.IceFile
class IceFile(AomiError): """Something is wrong with an aomi generated icefile""" catmsg = 'Corrupt Icefile'
class IceFile(AomiError): '''Something is wrong with an aomi generated icefile''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,982
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.GPG
class GPG(AomiError): """Covers errors related to our GPG wrapper""" catmsg = 'Something went wrong interacting with GPG'
class GPG(AomiError): '''Covers errors related to our GPG wrapper''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,983
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.AomiFile
class AomiFile(AomiError): """Something is wrong with a file on the local filesystem""" catmsg = 'Problem with a local file'
class AomiFile(AomiError): '''Something is wrong with a file on the local filesystem''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,984
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.AomiError
class AomiError(Exception): """Our generic exception. Builds up an appropriate error message for representation to the user""" catmsg = None def __init__(self, message=None): msg = None if self.catmsg is not None and message is not None: msg = "%s - %s" % (self.catmsg, message) elif self.catmsg is not None: msg = self.catmsg elif message is not None: msg = message if msg is not None: super(AomiError, self).__init__(msg) else: super(AomiError, self).__init__()
class AomiError(Exception): '''Our generic exception. Builds up an appropriate error message for representation to the user''' def __init__(self, message=None): pass
2
1
13
1
12
0
5
0.14
1
1
0
11
1
0
1
11
18
2
14
4
12
2
11
4
9
5
3
1
5
7,985
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.AomiData
class AomiData(AomiError): """Some kind of aomi specific data is invalid""" catmsg = 'Invalid aomi data'
class AomiData(AomiError): '''Some kind of aomi specific data is invalid''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,986
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.AomiCredentials
class AomiCredentials(AomiError): """This exception is used for representing errors related to authenticating against a running Vault server""" catmsg = 'Something wrong with Vault credentials'
class AomiCredentials(AomiError): '''This exception is used for representing errors related to authenticating against a running Vault server''' pass
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
11
4
0
2
2
1
2
2
2
1
0
4
0
0
7,987
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.AomiCommand
class AomiCommand(AomiError): """Invalid interaction attempted with the aomi cli""" catmsg = 'Problem with command line arguments'
class AomiCommand(AomiError): '''Invalid interaction attempted with the aomi cli''' pass
1
1
0
0
0
0
0
0.5
1
0
0
0
0
0
0
11
3
0
2
2
1
1
2
2
1
0
4
0
0
7,988
Autodesk/aomi
Autodesk_aomi/aomi/exceptions.py
aomi.exceptions.Validation
class Validation(AomiError): """Some kind of validation failed. Invalid string, length, who knows. Never trust user input tho.""" catmsg = 'Validation Error' source = None def __init__(self, message=None, source=None): super(Validation, self).__init__(message=message) self.source = source
class Validation(AomiError): '''Some kind of validation failed. Invalid string, length, who knows. Never trust user input tho.''' def __init__(self, message=None, source=None): pass
2
1
3
0
3
0
1
0.33
1
1
0
0
1
0
1
12
9
1
6
4
4
2
6
4
4
1
4
0
1
7,989
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.UserPassUser
class UserPassUser(Auth): """UserPass User Account""" required_fields = ['username', 'password_file', 'policies'] config_key = 'users' def export(self, _directory): pass def __init__(self, obj, opt): super(UserPassUser, self).__init__('userpass', obj, opt) self.username = obj['username'] self.mount = 'userpass' self.path = sanitize_mount("auth/userpass/users/%s" % self.username) self.secret = obj['password_file'] self._obj = { 'policies': obj['policies'] } map_val(self._obj, obj, 'ttl') map_val(self._obj, obj, 'max_ttl') self.filename = self.secret def secrets(self): return [self.secret] def diff(self, obj=None): return Resource.diff_write_only(self) def obj(self): filename = hard_path(self.filename, self.opt.secrets) secret_file(filename) password = open(filename).readline().strip() a_obj = self._obj a_obj['password'] = password a_obj['policies'] = ','.join(sorted(a_obj['policies'])) return a_obj
class UserPassUser(Auth): '''UserPass User Account''' def export(self, _directory): pass def __init__(self, obj, opt): pass def secrets(self): pass def diff(self, obj=None): pass def obj(self): pass
6
1
5
0
5
0
1
0.03
1
1
0
0
5
6
5
27
35
5
29
17
23
1
27
17
21
1
3
0
5
7,990
Autodesk/aomi
Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.LDAPGroup
class LDAPGroup(Resource): """LDAP Group Policy Mapping""" required_fields = ['policies', 'group'] config_key = 'ldap_groups' def __init__(self, obj, opt): super(LDAPGroup, self).__init__(obj, opt) self.group = obj['group'] self.path = sanitize_mount("auth/%s/groups/%s" % (obj.get('mount', 'ldap'), self.group)) if self.present: self._obj = { "policies": obj['policies'] } def fetch(self, vault_client): super(LDAPGroup, self).fetch(vault_client) if self.existing: s_policies = sorted(self.existing['policies'].split(',')) self.existing['policies'] = s_policies def obj(self): return { 'policies': sorted(self._obj.get('policies', [])) } def write(self, client): w_obj = self._obj w_obj['policies'] = ','.join(w_obj['policies']) client.write(self.path, **w_obj)
class LDAPGroup(Resource): '''LDAP Group Policy Mapping''' def __init__(self, obj, opt): pass def fetch(self, vault_client): pass def obj(self): pass def write(self, client): pass
5
1
6
0
6
0
2
0.04
1
1
0
0
4
3
4
25
30
4
25
12
20
1
20
12
15
2
2
1
6
7,991
Autodesk/aomi
Autodesk_aomi/aomi/model/aws.py
aomi.model.aws.AWSRole
class AWSRole(Resource): """AWS Role""" required_fields = ['name', ['policy', 'arn']] child = True def __init__(self, mount, obj, opt): super(AWSRole, self).__init__(obj, opt) self.path = "%s/roles/%s" % (mount, obj['name']) if 'policy' in obj: self.filename = obj['policy'] if self.present: self._obj = obj if 'policy' in self._obj: self._obj['policy'] = hard_path(self.filename, opt.policies) def export(self, directory): if not hasattr(self, 'filename'): return secret_h = self.export_handle(directory) secret_h.write(self.obj()['policy']) secret_h.close() def obj(self): s_obj = {} if 'policy' in self._obj: role_template_obj = self._obj.get('vars', {}) base_obj = load_vars(self.opt) template_obj = merge_dicts(role_template_obj, base_obj) aws_role = render(self._obj['policy'], template_obj) aws_role = aws_role.replace(" ", "").replace("\n", "") s_obj = {'policy': aws_role} elif 'arn' in self._obj: s_obj = {'arn': self._obj['arn']} return s_obj
class AWSRole(Resource): '''AWS Role''' def __init__(self, mount, obj, opt): pass def export(self, directory): pass def obj(self): pass
4
1
10
1
9
0
3
0.03
1
1
0
0
3
3
3
24
37
6
30
15
26
1
29
15
25
4
2
2
9
7,992
Autodesk/aomi
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.Policy
class Policy(Resource): """Vault Policy""" required_fields = ['file', 'name'] config_key = 'policies' def __init__(self, obj, opt): super(Policy, self).__init__(obj, opt) self.path = obj['name'] if self.present: self.filename = obj['file'] base_obj = load_vars(opt) self._obj = merge_dicts(base_obj, obj.get('vars', {})) def validate(self, obj): super(Policy, self).validate(obj) if 'vars' in obj and not isinstance(obj['vars'], dict): raise aomi.exceptions.Validation('policy vars must be dicts') def obj(self): return render(hard_path(self.filename, self.opt.policies), self._obj) \ .lstrip() \ .strip() \ .replace("\n\n", "\n") @wrap_vault("reading") def read(self, client): LOG.debug("Reading %s", self) a_policy = client.get_policy(self.path) if a_policy: return a_policy.lstrip() \ .strip() \ .replace("\n\n", "\n") return None @wrap_vault("writing") def write(self, client): client.set_policy(self.path, self.obj()) @wrap_vault("deleting") def delete(self, client): LOG.debug("Deleting %s", self) client.delete_policy(self.path)
class Policy(Resource): '''Vault Policy''' def __init__(self, obj, opt): pass def validate(self, obj): pass def obj(self): pass @wrap_vault("reading") def read(self, client): pass @wrap_vault("writing") def write(self, client): pass @wrap_vault("deleting") def delete(self, client): pass
10
1
5
0
5
0
2
0.03
1
3
1
0
6
3
6
27
43
7
35
17
25
1
27
14
20
2
2
1
9
7,993
Autodesk/aomi
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Autodesk_aomi/aomi/model/auth.py
aomi.model.auth.TokenRole
class TokenRole(Auth): """TokenRole""" required_fields = ['name'] config_key = 'tokenroles' def resources(self): return [self] + self.secret_ids def __init__(self, obj, opt): super(TokenRole, self).__init__('tokenrole', obj, opt) self.role_name = obj['name'] self.path = "auth/token/roles/%s" % obj['name'] self.mount = 'token' self.backend = 'token' self.secret_ids = [] role_obj = {} for policy_type in ['allowed_policies', 'disallowed_policies']: if policy_type in obj: policies = obj[policy_type] role_obj[policy_type] = ','.join(sorted(policies)) map_val(role_obj, obj, 'orphan', True) map_val(role_obj, obj, 'period', 0) map_val(role_obj, obj, 'renewable', True) map_val(role_obj, obj, 'explicit_max_ttl', 0) map_val(role_obj, obj, 'path_suffix', '') self._obj = role_obj def diff(self, obj=None): obj = dict(self.obj()) for policy_type in ['allowed_policies', 'disallowed_policies']: if policy_type in obj: obj[policy_type] = obj[policy_type].split(',') obj[policy_type] = sorted(obj[policy_type]) return super(TokenRole, self).diff(obj) @wrap_vault("writing") def write(self, client): client.write(self.path, **self.obj()) @wrap_vault("reading") def read(self, client): try: return client.read(self.path) except hvac.exceptions.InvalidPath: return None @wrap_vault("deleting") def delete(self, client): client.delete(self.path)
class TokenRole(Auth): '''TokenRole''' def resources(self): pass def __init__(self, obj, opt): pass def diff(self, obj=None): pass @wrap_vault("writing") def write(self, client): pass @wrap_vault("reading") def read(self, client): pass @wrap_vault("deleting") def delete(self, client): pass
10
1
7
1
6
0
2
0.02
1
2
0
0
6
6
6
28
54
11
42
22
32
1
39
19
32
3
3
2
11
7,994
Autodesk/aomi
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Autodesk_aomi/aomi/model/resource.py
aomi.model.resource.Resource
class Resource(object): """Vault Resource All aomi derived Vault resources should extend this class. It provides functionality for validation and API CRUD operations.""" required_fields = [] config_key = None resource_key = None child = False no_resource = False secret_format = 'data' def thaw(self, tmp_dir): """Will perform some validation and copy a decrypted secret to it's final location""" for sfile in self.secrets(): src_file = "%s/%s" % (tmp_dir, sfile) err_msg = "%s secret missing from icefile" % (self) if not os.path.exists(src_file): if hasattr(self.opt, 'ignore_missing') and \ self.opt.ignore_missing: LOG.warning(err_msg) continue else: raise aomi_excep.IceFile(err_msg) dest_file = "%s/%s" % (self.opt.secrets, sfile) dest_dir = os.path.dirname(dest_file) if not os.path.exists(dest_dir): os.mkdir(dest_dir) shutil.copy(src_file, dest_file) LOG.debug("Thawed %s %s", self, sfile) def tunable(self, obj): """A tunable resource maps against a backend...""" self.tune = dict() if 'tune' in obj: for tunable in MOUNT_TUNABLES: tunable_key = tunable[0] map_val(self.tune, obj['tune'], tunable_key) if tunable_key in self.tune and \ is_vault_time(self.tune[tunable_key]): vault_time_s = vault_time_to_s(self.tune[tunable_key]) self.tune[tunable_key] = vault_time_s if 'description' in obj: self.tune['description'] = obj['description'] def export_handle(self, directory): """Get a filehandle for exporting""" filename = getattr(self, 'filename') dest_file = "%s/%s" % (directory, filename) dest_dir = os.path.dirname(dest_file) if not os.path.isdir(dest_dir): os.mkdir(dest_dir, 0o700) return open(dest_file, 'w') def export(self, directory): """Export exportable resources decoding as needed""" if not self.existing or not hasattr(self, 'filename'): return secret_h = self.export_handle(directory) obj = self.existing if isinstance(obj, str): secret_h.write(obj) elif isinstance(obj, dict): secret_h.write(yaml.safe_dump(obj)) def freeze(self, tmp_dir): """Copies a secret into a particular location""" for sfile in self.secrets(): src_file = hard_path(sfile, self.opt.secrets) if not os.path.exists(src_file): raise aomi_excep.IceFile("%s secret not found at %s" % (self, src_file)) dest_file = "%s/%s" % (tmp_dir, sfile) dest_dir = os.path.dirname(dest_file) if not os.path.isdir(dest_dir): os.mkdir(dest_dir, 0o700) shutil.copy(src_file, dest_file) LOG.debug("Froze %s %s", self, sfile) def resources(self): """List of included resources""" return [self] def grok_state(self, obj): """Determine the desired state of this resource based on data present""" if 'state' in obj: my_state = obj['state'].lower() if my_state != 'absent' and my_state != 'present': raise aomi_excep \ .Validation('state must be either "absent" or "present"') self.present = obj.get('state', 'present').lower() == 'present' def validate(self, obj): """Base validation method. Will inspect class attributes to dermine just what should be present""" if 'tags' in obj and not isinstance(obj['tags'], list): raise aomi_excep.Validation('tags must be a list') if self.present: check_obj(self.required_fields, self.name(), obj) def name(self): """A Friendly Name for our Resource""" return self.__doc__.split('\n')[0] def __str__(self): return "%s %s" % (self.name(), self.path) def obj(self): """Returns the Python dict/JSON object representation of this Secret as it is to be written to Vault""" return self._obj # note that this is going to be implemented by subclasses def secrets(self): # pylint: disable=no-self-use """Returns a list of secrets which may be used used locally by this Vault resource""" return [] def __init__(self, obj, opt): self.grok_state(obj) self.validate(obj) self.path = None self.existing = None self._obj = {} self.tags = obj.get('tags', []) self.opt = opt self.tune = None def diff(self, obj=None): """Determine if something has changed or not""" if self.no_resource: return NOOP if not self.present: if self.existing: return DEL return NOOP if not obj: obj = self.obj() is_diff = NOOP if self.present and self.existing: if isinstance(self.existing, dict): current = dict(self.existing) if 'refresh_interval' in current: del current['refresh_interval'] if diff_dict(current, obj): is_diff = CHANGED elif is_unicode(self.existing): if self.existing != obj: is_diff = CHANGED elif self.present and not self.existing: is_diff = ADD return is_diff def fetch(self, vault_client): """Populate internal representation of remote Vault resource contents""" result = self.read(vault_client) if result: if isinstance(result, dict) and 'data' in result: self.existing = result['data'] else: self.existing = result else: self.existing = None def sync(self, vault_client): """Update remove Vault resource contents if needed""" if self.present and not self.existing: LOG.info("Writing new %s to %s", self.secret_format, self) self.write(vault_client) elif self.present and self.existing: if self.diff() == CHANGED or self.diff() == OVERWRITE: LOG.info("Updating %s in %s", self.secret_format, self) self.write(vault_client) elif not self.present and not self.existing: LOG.info("No %s to remove from %s", self.secret_format, self) elif not self.present and self.existing: LOG.info("Removing %s from %s", self.secret_format, self) self.delete(vault_client) def filtered(self): """Determines whether or not resource is filtered. Resources may be filtered if the tags do not match or the user has specified explict paths to include or exclude via command line options""" if not is_tagged(self.tags, self.opt.tags): LOG.info("Skipping %s as it does not have requested tags", self.path) return False if not specific_path_check(self.path, self.opt): LOG.info("Skipping %s as it does not match specified paths", self.path) return False return True @staticmethod def diff_write_only(resource): """A different implementation of diff that is used for those Vault resources that are write-only such as AWS root configs""" if resource.present and not resource.existing: return ADD elif not resource.present and resource.existing: return DEL elif resource.present and resource.existing: return OVERWRITE return NOOP @wrap_vault("reading") def read(self, client): """Read from Vault while handling non surprising errors.""" val = None if self.no_resource: return val LOG.debug("Reading from %s", self) try: val = client.read(self.path) except hvac.exceptions.InvalidRequest as vault_exception: if str(vault_exception).startswith('no handler for route'): val = None return val @wrap_vault("writing") def write(self, client): """Write to Vault while handling non-surprising errors.""" val = None if not self.no_resource: val = client.write(self.path, **self.obj()) return val @wrap_vault("deleting") def delete(self, client): """Delete from Vault while handling non-surprising errors.""" val = None if self.no_resource: return val LOG.debug("Deleting %s", self) try: client.delete(self.path) except (hvac.exceptions.InvalidPath, hvac.exceptions.InvalidRequest) \ as vault_exception: if str(vault_exception).startswith('no handler for route'): val = None return val
class Resource(object): '''Vault Resource All aomi derived Vault resources should extend this class. It provides functionality for validation and API CRUD operations.''' def thaw(self, tmp_dir): '''Will perform some validation and copy a decrypted secret to it's final location''' pass def tunable(self, obj): '''A tunable resource maps against a backend...''' pass def export_handle(self, directory): '''Get a filehandle for exporting''' pass def export_handle(self, directory): '''Export exportable resources decoding as needed''' pass def freeze(self, tmp_dir): '''Copies a secret into a particular location''' pass def resources(self): '''List of included resources''' pass def grok_state(self, obj): '''Determine the desired state of this resource based on data present''' pass def validate(self, obj): '''Base validation method. Will inspect class attributes to dermine just what should be present''' pass def name(self): '''A Friendly Name for our Resource''' pass def __str__(self): pass def obj(self): '''Returns the Python dict/JSON object representation of this Secret as it is to be written to Vault''' pass def secrets(self): '''Returns a list of secrets which may be used used locally by this Vault resource''' pass def __init__(self, obj, opt): pass def diff(self, obj=None): '''Determine if something has changed or not''' pass def fetch(self, vault_client): '''Populate internal representation of remote Vault resource contents''' pass def sync(self, vault_client): '''Update remove Vault resource contents if needed''' pass def filtered(self): '''Determines whether or not resource is filtered. Resources may be filtered if the tags do not match or the user has specified explict paths to include or exclude via command line options''' pass @staticmethod def diff_write_only(resource): '''A different implementation of diff that is used for those Vault resources that are write-only such as AWS root configs''' pass @wrap_vault("reading") def read(self, client): '''Read from Vault while handling non surprising errors.''' pass @wrap_vault("writing") def write(self, client): '''Write to Vault while handling non-surprising errors.''' pass @wrap_vault("deleting") def delete(self, client): '''Delete from Vault while handling non-surprising errors.''' pass
26
20
11
1
9
1
3
0.18
1
5
2
13
20
7
21
21
275
45
195
65
169
36
168
59
146
12
1
3
70
7,995
Autodesk/aomi
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Autodesk_aomi/tests/test_seed.py
test_seed.GeneratedSecretTest
class GeneratedSecretTest(unittest.TestCase): @unittest.skip("until it uses the model") def test_secretfile_overwrite(self): aomi_opt = aomi.cli.parser_factory(['seed'])[1] og_obj = { 'mount': 'foo', 'path': 'bar', 'keys': [ { 'name': 'user', 'method': 'words', 'overwrite': False }, { 'name': 'pass', 'method': 'words', 'overwrite': True } ] } secret = aomi.model.generic.generate_obj( 'foo/bar', og_obj, {}, aomi_opt) secret2 = aomi.model.generic.generate_obj( 'foo/bar', og_obj, secret, aomi_opt) assert secret['user'] == secret2['user'] assert secret['pass'] != secret2['pass']
class GeneratedSecretTest(unittest.TestCase): @unittest.skip("until it uses the model") def test_secretfile_overwrite(self): pass
3
0
22
0
22
0
1
0
1
0
0
0
1
0
1
73
24
0
24
7
21
0
8
6
6
1
2
0
1
7,996
Autodesk/aomi
Autodesk_aomi/aomi/model/resource.py
aomi.model.resource.Mount
class Mount(Resource): """Vault Generic Backend""" required_fields = ['path'] config_key = 'mounts' backend = 'generic' secret_format = 'mount point' no_resource = True def __init__(self, obj, opt): super(Mount, self).__init__(obj, opt) self.mount = obj['path'] self.path = self.mount self.tunable(obj)
class Mount(Resource): '''Vault Generic Backend''' def __init__(self, obj, opt): pass
2
1
5
0
5
0
1
0.09
1
1
0
0
1
2
1
22
13
1
11
9
9
1
11
9
9
1
2
0
1
7,997
Autodesk/aomi
Autodesk_aomi/aomi/model/aws.py
aomi.model.aws.AWS
class AWS(Secret): """AWS Backend""" resource_key = 'aws_file' required_fields = [['aws_file', 'aws'], 'mount', 'region', 'roles'] backend = 'aws' def resources(self): pieces = [self] if self.present: pieces = pieces + [self.ttl] + self.roles return pieces def diff(self, obj=None): return Resource.diff_write_only(self) def fetch(self, vault_client): if is_mounted(self.backend, self.mount, vault_client.list_secret_backends()): self.existing = True def sync(self, vault_client): if self.present: LOG.info("Writing AWS root to %s", self.path) self.write(vault_client) else: LOG.info("Removing AWS root at %s", self.path) self.delete(vault_client) def obj(self): _secret, filename, region = self._obj actual_filename = hard_path(filename, self.opt.secrets) secret_file(actual_filename) template_obj = load_vars(self.opt) aws_obj = load_var_file(actual_filename, template_obj) check_obj(['access_key_id', 'secret_access_key'], self, aws_obj) return { 'access_key': aws_obj['access_key_id'], 'secret_key': aws_obj['secret_access_key'], 'region': region } def secrets(self): return [self._obj[0]] def __init__(self, obj, opt): super(AWS, self).__init__(obj, opt) self.mount = sanitize_mount(obj['mount']) self.path = "%s/config/root" % self.mount aws_file_path = obj['aws_file'] if self.present: self._obj = (obj['aws_file'], aws_file_path, obj['region']) self.roles = [] for role in obj['roles']: self.roles.append(AWSRole(self.mount, role, opt)) if self.roles is None: raise aomi.exceptions.AomiData('missing aws roles') ttl_obj, _lease_msg = grok_ttl(obj) if ttl_obj: self.ttl = AWSTTL(self.mount, ttl_obj, opt) self.tunable(obj)
class AWS(Secret): '''AWS Backend''' def resources(self): pass def diff(self, obj=None): pass def fetch(self, vault_client): pass def sync(self, vault_client): pass def obj(self): pass def secrets(self): pass def __init__(self, obj, opt): pass
8
1
8
1
7
0
2
0.02
1
4
3
0
7
6
7
28
70
12
57
25
49
1
46
25
38
5
3
2
14
7,998
Autodesk/aomi
Autodesk_aomi/aomi/model/resource.py
aomi.model.resource.Auth
class Auth(Resource): """Auth Backend""" def __init__(self, backend, obj, opt): super(Auth, self).__init__(obj, opt) self.backend = backend
class Auth(Resource): '''Auth Backend''' def __init__(self, backend, obj, opt): pass
2
1
3
0
3
0
1
0.25
1
1
0
6
1
1
1
22
5
0
4
3
2
1
4
3
2
1
2
0
1
7,999
Autodesk/aomi
Autodesk_aomi/aomi/model/resource.py
aomi.model.resource.AuditLog
class AuditLog(Resource): """Audit Logs Only supports syslog and file backends""" required_fields = ['type'] config_key = 'audit_logs' no_resource = True def __init__(self, log_obj, opt): super(AuditLog, self).__init__(log_obj, opt) self.backend = log_obj['type'] self.mount = self.backend self.path = log_obj.get('path', self.backend) obj = { 'name': log_obj.get('name', self.backend), } obj_opt = dict() if self.backend == 'file': obj_opt['file_path'] = log_obj['file_path'] if self.backend == 'syslog': if 'tag' in log_obj: obj_opt['tag'] = log_obj['tag'] if 'facility' in log_obj: obj_opt['facility'] = log_obj['facility'] if 'description' in log_obj: obj_opt['description'] = log_obj['description'] obj['options'] = obj_opt self._obj = obj self.tunable(obj)
class AuditLog(Resource): '''Audit Logs Only supports syslog and file backends''' def __init__(self, log_obj, opt): pass
2
1
25
4
21
0
6
0.08
1
2
0
0
1
4
1
22
32
5
25
11
23
2
23
11
21
6
2
2
6