diff --git a/executor/crud/playbooks_update_processor.py b/executor/crud/playbooks_update_processor.py index 2a7caed3d..07df5f28a 100644 --- a/executor/crud/playbooks_update_processor.py +++ b/executor/crud/playbooks_update_processor.py @@ -71,7 +71,9 @@ def update_playbook(elem: PlayBook, update_op: UpdatePlaybookOp.UpdatePlaybook) task.is_active = False task.save(update_fields=['is_active']) elem.is_active = False - elem.save(update_fields=['is_active']) + random_generated_str = str(uuid.uuid4()) + elem.name = f"{elem.name}###(inactive)###{random_generated_str}" + elem.save(update_fields=['is_active', 'name']) updated_playbook = update_op.playbook updated_elem, err = create_db_playbook(elem.account, elem.created_by, updated_playbook) if err: diff --git a/executor/workflows/action/notify_action_executor/slack_notifier.py b/executor/workflows/action/notify_action_executor/slack_notifier.py index 25d7a8b56..7c0d614fd 100644 --- a/executor/workflows/action/notify_action_executor/slack_notifier.py +++ b/executor/workflows/action/notify_action_executor/slack_notifier.py @@ -39,13 +39,22 @@ def notify(self, config: WorkflowActionNotificationConfigProto, execution_output blocks = [] for i, interpretation in enumerate(execution_output): if interpretation.type == InterpretationProto.Type.SUMMARY: - blocks.append({ - "type": "section", - "text": { - "type": "mrkdwn", - "text": f'Step {i + 1}: {interpretation.title.value}' - } - }) + if interpretation.title.value.startswith('Hello team'): + blocks.append({ + "type": "section", + "text": { + "type": "mrkdwn", + "text": interpretation.title.value + } + }) + else: + blocks.append({ + "type": "section", + "text": { + "type": "mrkdwn", + "text": f'Step {i + 1}: {interpretation.title.value}' + } + }) elif interpretation.type == InterpretationProto.Type.IMAGE: blocks.append({ "type": "section", diff --git a/executor/workflows/crud/workflows_update_processor.py b/executor/workflows/crud/workflows_update_processor.py index ce81c5d3f..95054dda7 100644 --- a/executor/workflows/crud/workflows_update_processor.py +++ b/executor/workflows/crud/workflows_update_processor.py @@ -74,8 +74,9 @@ def update_workflow(elem: Workflow, update_op: UpdateWorkflowOp.UpdateWorkflow) for action_mapping in all_workflow_actions_mapping: action_mapping.is_active = False action_mapping.save(update_fields=['is_active']) - elem.is_active = False - elem.save(update_fields=['is_active']) + random_generated_str = str(uuid.uuid4()) + elem.name = f"{elem.name}###(inactive)###{random_generated_str}" + elem.save(update_fields=['is_active', 'name']) updated_workflow = update_op.workflow updated_elem, err = create_db_workflow(elem.account, elem.created_by, updated_workflow) if err: diff --git a/executor/workflows/tasks.py b/executor/workflows/tasks.py index 6502c248e..c09a623b9 100644 --- a/executor/workflows/tasks.py +++ b/executor/workflows/tasks.py @@ -157,7 +157,9 @@ def workflow_action_execution(account_id, workflow_id, workflow_execution_id, pl playbook_execution = playbook_executions.first() pe_proto: PlaybookExecutionProto = playbook_execution.proto pe_logs = pe_proto.logs - execution_output: [InterpretationProto] = playbook_execution_result_interpret(InterpreterType.BASIC_I, pe_logs) + p_proto = pe_proto.playbook + execution_output: [InterpretationProto] = playbook_execution_result_interpret(InterpreterType.BASIC_I, p_proto, + pe_logs) workflow = workflows.first() w_proto: WorkflowProto = workflow.proto w_actions = w_proto.actions diff --git a/intelligence_layer/task_result_interpreters/task_result_interpreter_facade.py b/intelligence_layer/task_result_interpreters/task_result_interpreter_facade.py index 78a14fc7d..d04850a37 100644 --- a/intelligence_layer/task_result_interpreters/task_result_interpreter_facade.py +++ b/intelligence_layer/task_result_interpreters/task_result_interpreter_facade.py @@ -1,11 +1,13 @@ import logging +from google.protobuf.wrappers_pb2 import StringValue + from intelligence_layer.task_result_interpreters.metric_task_result_interpreters.basic_metric_task_interpreter import \ basic_metric_task_result_interpreter from protos.playbooks.intelligence_layer.interpreter_pb2 import InterpreterType, Interpretation as InterpretationProto from protos.playbooks.playbook_pb2 import PlaybookMetricTaskExecutionResult as PlaybookMetricTaskExecutionResultProto, \ PlaybookTaskDefinition as PlaybookTaskDefinitionProto, \ - PlaybookTaskExecutionResult as PlaybookTaskExecutionResultProto, PlaybookExecutionLog + PlaybookTaskExecutionResult as PlaybookTaskExecutionResultProto, PlaybookExecutionLog, Playbook as PlaybookProto logger = logging.getLogger(__name__) @@ -19,9 +21,12 @@ def task_result_interpret(interpreter_type: InterpreterType, task: PlaybookTaskD return basic_metric_task_result_interpreter(task, metric_task_result) -def playbook_execution_result_interpret(interpreter_type: InterpreterType, +def playbook_execution_result_interpret(interpreter_type: InterpreterType, playbook: PlaybookProto, playbook_execution_logs: [PlaybookExecutionLog]) -> [InterpretationProto]: - interpretations: [InterpretationProto] = [] + base_title = f'Hello team, here is snapshot of `{playbook.name.value}` that is configured for this alert' + interpretations: [InterpretationProto] = [ + InterpretationProto(type=InterpretationProto.Type.SUMMARY, title=StringValue(value=base_title)) + ] for log in playbook_execution_logs: try: interpretation_result = task_result_interpret(interpreter_type, log.task, log.task_execution_result)