diff --git a/images/ansible/common/10_image_cleanup.yml b/images/ansible/common/10_image_cleanup.yml index 838320dc..080149bd 100644 --- a/images/ansible/common/10_image_cleanup.yml +++ b/images/ansible/common/10_image_cleanup.yml @@ -77,3 +77,15 @@ ansible.builtin.file: path: "~{{ ansible_user }}/.bash_history" state: absent + +- name: Logout of container registries + become: true + become_user: "{{ item }}" + containers.podman.podman_logout: + all: true + no_log: true + loop: + - "{{ student_username }}" + - awx + - root + - "{{ ansible_user }}" diff --git a/images/ansible/common/50_install_controller.yml b/images/ansible/common/50_install_controller.yml index 8420f440..52947ee1 100644 --- a/images/ansible/common/50_install_controller.yml +++ b/images/ansible/common/50_install_controller.yml @@ -103,12 +103,10 @@ until: __controller_setup_async_result.finished register: __controller_setup_async_result when: __controller_setup_async.ansible_job_id is defined - rescue: - - name: Print error message - ansible.builtin.fail: - msg: | - "Rescue - Controller installation failed. - "{{ __controller_setup_async_result }}" + always: + - name: Print setup output + ansible.builtin.debug: + var: __controller_setup_async_result - name: Wait for Automation Controller to be up ansible.builtin.uri: diff --git a/images/ansible/common/5_configure_users.yml b/images/ansible/common/5_configure_users.yml index a33fca04..f201876d 100644 --- a/images/ansible/common/5_configure_users.yml +++ b/images/ansible/common/5_configure_users.yml @@ -16,7 +16,7 @@ line: >- PasswordAuthentication yes -- name: Configure sudoers for wheel group +- name: Configure sudoers for wheel group # noqa args[module] community.general.sudoers: name: wheel_sudoers state: present diff --git a/images/ansible/common/60_check_vars.yml b/images/ansible/common/60_check_vars.yml index 29db017e..df2d6bab 100644 --- a/images/ansible/common/60_check_vars.yml +++ b/images/ansible/common/60_check_vars.yml @@ -3,12 +3,16 @@ # Converts environment variables to YAML equivalent and creates facts. ## - name: Make sure track_slug variable is set + run_once: true + delegate_to: localhost ansible.builtin.assert: that: lookup('ansible.builtin.env', 'TRACK_SLUG') or track_slug is defined quiet: true fail_msg: "Please set the Ansible 'track_slug' variable or 'TRACK_SLUG' environment variable" - name: Set track_slug fact from TRACK_SLUG environment variable + run_once: true + delegate_to: localhost ansible.builtin.set_fact: track_slug: "{{ lookup('ansible.builtin.env', 'TRACK_SLUG') }}" when: @@ -17,6 +21,7 @@ - name: Include vars file from vars directory when: (track_slug is defined) and (track_slug | length > 0) + run_once: true delegate_to: localhost become: false no_log: true @@ -42,17 +47,23 @@ when: vars_file.stat.exists - name: Map required variables to facts + run_once: true + delegate_to: localhost + when: (extra_vars is defined) and (extra_vars | length > 0) + no_log: true ansible.builtin.set_fact: "{{ __extra_vars_item.name }}": "{{ lookup('ansible.builtin.env', __extra_vars_item.env | default(omit) ) or lookup('ansible.builtin.vars', __extra_vars_item.name | default(omit)) }}" # yamllint disable-line rule:line-length register: __extra_vars_results - when: (extra_vars is defined) and (extra_vars | length > 0) loop_control: loop_var: __extra_vars_item label: "{{ __extra_vars_item.name }}" loop: "{{ extra_vars }}" - no_log: true - name: Check required variables are set + run_once: true + when: (__extra_vars_results.results is defined) and (__extra_vars_results.results | length > 0) + delegate_to: localhost + no_log: true ansible.builtin.assert: that: - lookup('ansible.builtin.vars', __extra_vars_results_item.__extra_vars_item.name) | length > 0 @@ -61,9 +72,7 @@ - Environment variable - {{ __extra_vars_results_item.__extra_vars_item.env | default('Not applicable - ignore') }}. - Ansible variable - {{ __extra_vars_results_item.__extra_vars_item.name }}. quiet: true - when: (__extra_vars_results.results is defined) and (__extra_vars_results.results | length > 0) loop: "{{ __extra_vars_results.results }}" loop_control: loop_var: __extra_vars_results_item label: "{{ __extra_vars_results_item.__extra_vars_item.name }}" - no_log: true diff --git a/images/ansible/common/70_controller_post_install.yml b/images/ansible/common/70_controller_post_install.yml index c6d43764..27f583b7 100644 --- a/images/ansible/common/70_controller_post_install.yml +++ b/images/ansible/common/70_controller_post_install.yml @@ -11,6 +11,7 @@ controller_password: "{{ controller_password }}" controller_host: "{{ controller_hostname }}" validate_certs: "{{ controller_validate_certs }}" + no_log: true - name: Install EPEL repository ansible.builtin.yum_repository: @@ -71,7 +72,7 @@ # Added for student command line tasks - name: Copy ansible-navigator.yml template - {{ student_username }} ansible.builtin.template: - src: "ansible-navigator.yml.j2" + src: "{{ track_slug }}/ansible-navigator.yml.j2" dest: "~{{ student_username }}/.ansible-navigator.yml" mode: "644" owner: "{{ student_username }}" @@ -192,7 +193,10 @@ loop: - "{{ student_username }}" - awx + - root + - "{{ ansible_user }}" + # TODO: Fix with inner / outer loop for user and controller_execution_environments var - name: Pull execution environment images - awx become_user: "awx" containers.podman.podman_image: @@ -200,8 +204,8 @@ loop: "{{ controller_execution_environments }}" register: __podman_pull_supported_awx until: __podman_pull_supported_awx is not failed - retries: 40 - delay: 2 + retries: 180 + delay: 1 - name: Pull execution environment images - {{ student_username }} become_user: "{{ student_username }}" @@ -210,15 +214,16 @@ loop: "{{ controller_execution_environments }}" register: __podman_pull_supported_student until: __podman_pull_supported_student is not failed - retries: 40 - delay: 2 + retries: 180 + delay: 1 # Added for track lifecycle scripts - root - name: Pull execution environment images - {{ ansible_user_id }} containers.podman.podman_image: name: "{{ item.image }}" + become: true loop: "{{ controller_execution_environments }}" register: __podman_pull_supported_student until: __podman_pull_supported_student is not failed - retries: 40 - delay: 2 + retries: 180 + delay: 1 diff --git a/images/ansible/lightspeed-101-setup.yml b/images/ansible/lightspeed-101-setup.yml index 184e2801..89396423 100644 --- a/images/ansible/lightspeed-101-setup.yml +++ b/images/ansible/lightspeed-101-setup.yml @@ -8,24 +8,12 @@ vars: gcp_login: &gcp_login - project: "{{ gcp_project }}" - auth_kind: serviceaccount - service_account_file: "{{ gcp_service_account_file }}" + project: "{{ gcp_resources_project }}" + zone: "{{ gcp_resources_zone }}" track_slug: lightspeed-101 pre_tasks: - name: Include variable tasks - vars: - extra_vars: - - name: track_slug - env: track_slug - - name: gcp_service_account_file - env: GCP_SERVICE_ACCOUNT_FILE - - name: gcp_service_account - env: GCP_SERVICE_ACCOUNT - - name: gcp_instances - - name: gcp_project - env: GCP_PROJECT ansible.builtin.include_tasks: file: "{{ playbook_dir }}/common/60_check_vars.yml" apply: @@ -38,7 +26,7 @@ # use '--tags never' if using ansible-playbook to install collections - name: Install collections - {{ inventory_hostname }} vars: - controller_collections: + installer_host_collections: - google.cloud - ansible.posix - community.crypto @@ -49,118 +37,61 @@ dest: /usr/share/ansible/collections type: collection name: "{{ item }}" - loop: "{{ controller_collections }}" + loop: "{{ installer_host_collections }}" register: __collection_install - when: (controller_collections is defined) and (controller_collections | length > 0) + when: (installer_host_collections is defined) and (installer_host_collections | length > 0) tags: - never + - host-installer-collections - - name: Remove existing gcloud instances - when: (gcp_instances is defined) and (gcp_instances | length > 0) - tags: - - remove-instances - block: - - name: Remove gcloud instances - google.cloud.gcp_compute_instance: - name: "{{ __remove_instances_job_async_item.name }}" - state: absent - zone: "{{ gcp_zone }}" - labels: "{{ __create_instances_job_async_item.labels | default(omit) }}" - tags: "{{ __create_instances_job_async_item.tags | default(omit) }}" - <<: *gcp_login - loop: "{{ gcp_instances }}" - loop_control: - loop_var: __remove_instances_job_async_item - label: "{{ __remove_instances_job_async_item.name }}" - async: 1000 - poll: 0 - register: __remove_instances_job_async - changed_when: not __remove_instances_job_async.changed + - name: Remove GCP instances + vars: + gcp_resources_instances_state: absent + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_instances.yml - - name: "Remove existing gcloud instances | Wait for finish the instance deletion" - ansible.builtin.async_status: - jid: "{{ __remove_instances_job_async_result_item.ansible_job_id }}" - retries: 60 - delay: 1 - loop: "{{ __remove_instances_job_async.results }}" - loop_control: - loop_var: __remove_instances_job_async_result_item - label: "{{ __remove_instances_job_async_result_item.__remove_instances_job_async_item.name }}" - until: __remove_instances_job_async_result.finished - register: __remove_instances_job_async_result - when: __remove_instances_job_async_result_item.ansible_job_id is defined - - - name: Create gcloud instances - when: (gcp_instances is defined) and (gcp_instances | length > 0) + - name: Create GCP instances + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_instances.yml + apply: + tags: + - create-instances tags: - create-instances - block: - - name: Create new gcloud instances - google.cloud.gcp_compute_instance: - name: "{{ __create_instances_job_async_item.name }}" - machine_type: "{{ __create_instances_job_async_item.machine_type }}" - labels: "{{ __create_instances_job_async_item.labels | default(omit) }}" - tags: "{{ __create_instances_job_async_item.tags | default(omit) }}" - state: present - zone: "{{ gcp_zone }}" - network_interfaces: "{{ __create_instances_job_async_item.network_interfaces }}" - disks: "{{ __create_instances_job_async_item.disks }}" - metadata: "{{ __create_instances_job_async_item.metadata }}" - <<: *gcp_login - loop: "{{ gcp_instances }}" - loop_control: - loop_var: __create_instances_job_async_item - label: "{{ __create_instances_job_async_item.name }}" - async: 1000 - poll: 0 - register: __create_instances_job_async - changed_when: not __create_instances_job_async.changed - - - name: Create new gcloud instances | Async - ansible.builtin.async_status: - jid: "{{ __create_instances_job_async_result_item.ansible_job_id }}" - retries: 60 - delay: 1 - loop: "{{ __create_instances_job_async.results }}" - loop_control: - loop_var: __create_instances_job_async_result_item - label: "{{ __create_instances_job_async_result_item.__create_instances_job_async_item.name }}" - until: __create_instances_job_async_result.finished - register: __create_instances_job_async_result - when: __create_instances_job_async_result_item.ansible_job_id is defined - rescue: - - name: Print error message - ansible.builtin.fail: - msg: | - "Rescue - Unable to create gcloud instances. - "{{ __create_instances_job_async_result }}" - notify: - - remove instances - - name: Get gcp node instance info - google.cloud.gcp_compute_instance_info: # noqa syntax-check + - name: Get GCP instance info + google.cloud.gcp_compute_instance_info: filters: - labels.role = "{{ track_slug }}" - zone: "{{ gcp_zone }}" <<: *gcp_login - register: __mesh_node_instance_info + register: __gcp_node_instance_info tags: - - create-instances + - always - name: Wait for gcloud instances to start when: - - (__mesh_node_instance_info.resources is defined) and (__mesh_node_instance_info.resources | length > 0) - - (gcp_instances is defined) and (gcp_instances | length > 0) + - (__gcp_node_instance_info.resources is defined) and (__gcp_node_instance_info.resources | length > 0) tags: - create-instances block: + - name: Remove previous known_hosts keys - {{ inventory_hostname }} + ansible.builtin.known_hosts: + name: "{{ __instances_item.networkInterfaces[0].accessConfigs[0].natIP }}" + state: absent + loop: "{{ __gcp_node_instance_info.resources }}" + loop_control: + loop_var: __instances_item + label: "{{ __instances_item.name }}" + - name: Wait for gcloud instances - SSH ansible.builtin.wait_for: host: "{{ __instances_ssh_async_item.networkInterfaces[0].accessConfigs[0].natIP }}" port: 22 delay: 1 timeout: 180 - loop: "{{ __mesh_node_instance_info.resources }}" + loop: "{{ __gcp_node_instance_info.resources }}" loop_control: loop_var: __instances_ssh_async_item label: "{{ __instances_ssh_async_item.name }}" @@ -169,8 +100,8 @@ register: __instances_ssh_async changed_when: not __instances_ssh_async.changed - - name: "Wait for gcloud instances - SSH | Wait for SSH" - ansible.builtin.async_status: + - name: Wait for gcloud instances - SSH | Async + ansible.builtin.async_status: # noqa args[module] jid: "{{ __instances_ssh_async_result_item.ansible_job_id }}" retries: 180 delay: 1 @@ -182,25 +113,39 @@ register: __instances_ssh_async_result when: __instances_ssh_async_result_item.ansible_job_id is defined rescue: - - name: Print error message + - name: Print error message and remove instances ansible.builtin.fail: msg: | "Rescue - Unable to connect to GCloud instances SSH. "{{ __instances_ssh_async_result }}" - notify: remove instances - name: Add gcp controller hosts to inventory ansible.builtin.add_host: name: "{{ item.name }}" groups: "automationcontroller" ansible_host: "{{ item.networkInterfaces[0].accessConfigs[0].natIP }}" - args: "{{ gcp_inventory_host_vars | default(omit) }}" - loop: "{{ __mesh_node_instance_info.resources }}" + args: "{{ gcp_inventory_host_vars }}" + loop: "{{ __gcp_node_instance_info.resources }}" loop_control: label: "{{ item.name }}" when: - item.labels["mesh-type"] == "controller" - - (__mesh_node_instance_info.resources is defined) and (__mesh_node_instance_info.resources | length > 0) + - (__gcp_node_instance_info.resources is defined) and (__gcp_node_instance_info.resources | length > 0) + tags: + - create-instances + + - name: Add gcp nodes to inventory + ansible.builtin.add_host: + name: "{{ item.name }}" + groups: "rhel-nodes" + ansible_host: "{{ item.networkInterfaces[0].accessConfigs[0].natIP }}" + args: "{{ gcp_inventory_host_vars }}" + loop: "{{ __gcp_node_instance_info.resources }}" + loop_control: + label: "{{ item.name }}" + when: + - item.labels["mesh-type"] == "rhel-node" + - (__gcp_node_instance_info.resources is defined) and (__gcp_node_instance_info.resources | length > 0) tags: - create-instances @@ -208,10 +153,10 @@ ansible.builtin.known_hosts: name: "{{ item.networkInterfaces[0].accessConfigs[0].natIP }}" state: absent - loop: "{{ __mesh_node_instance_info.resources }}" + loop: "{{ __gcp_node_instance_info.resources }}" loop_control: label: "{{ item.name }}" - when: (__mesh_node_instance_info.resources is defined) and (__mesh_node_instance_info.resources | length > 0) + when: (__gcp_node_instance_info.resources is defined) and (__gcp_node_instance_info.resources | length > 0) tags: - create-instances - ssh-key @@ -225,7 +170,7 @@ loop: "{{ query('inventory_hostnames', 'all') }}" - name: Create installation SSH key - when: (__mesh_node_instance_info.resources is defined) and (__mesh_node_instance_info.resources | length > 0) + when: (__gcp_node_instance_info.resources is defined) and (__gcp_node_instance_info.resources | length > 0) tags: - create-instances block: @@ -244,7 +189,6 @@ - name: Configure common tasks on all hosts hosts: all become: true - gather_facts: true tags: - common-tasks @@ -283,7 +227,6 @@ vars: extra_vars: - name: lab_containers - - name: track_slug ansible.builtin.include_tasks: file: "{{ playbook_dir }}/common/60_check_vars.yml" apply: @@ -311,7 +254,7 @@ filters: - labels.role = "{{ track_slug }}" - labels.mesh-type = controller - zone: "{{ gcp_zone }}" + zone: "{{ gcp_resources_zone }}" <<: *gcp_login delegate_to: localhost become: false @@ -526,13 +469,6 @@ group: "{{ student_username }}" mode: "755" - # 8 Aug 2023 - https://bugzilla.redhat.com/show_bug.cgi?id=2228948 - - name: Downgrade Firefox for Bug - 2228948 - ansible.builtin.dnf: - name: - - firefox-102.13.0-2.el9_2.x86_64 - allow_downgrade: true - - name: Customize Gnome kiosk configuration - {{ student_username }} environment: DISPLAY: :2 @@ -562,24 +498,6 @@ tags: - vscode-config - - name: Include common image cleanup tasks - ansible.builtin.include_tasks: - file: "{{ playbook_dir }}/common/10_image_cleanup.yml" - - - name: Build Cockpit install files cache - ansible.builtin.dnf: - name: - - cockpit - - cockpit-podman - state: present - download_only: true - - - name: Pull docker.io/dpage/pgadmin4 image - containers.podman.podman_image: - name: docker.io/dpage/pgadmin4 - tag: 7.5 - state: present - # https://github.com/redhat-cop/controller_configuration - name: Create lab controller objects ansible.builtin.include_role: @@ -590,7 +508,57 @@ tags: - controller-objects - # Run cleanup tasks on images + # Run cleanup tasks + - name: Include common image cleanup tasks + ansible.builtin.include_tasks: + file: "{{ playbook_dir }}/common/10_image_cleanup.yml" + + - name: Remove known_hosts entries for root + ansible.builtin.known_hosts: + name: "{{ item.name }}" + state: absent + loop: "{{ gcp_resources_instances }}" + + - name: Remove known_hosts entries for {{ admin_username }} + ansible.builtin.known_hosts: + name: "{{ item.name }}" + state: absent + become: false + loop: "{{ gcp_resources_instances }}" + +- name: Configure rhel nodes + hosts: rhel-nodes + become: true + tags: + - configure-rhel-nodes + + pre_tasks: + - name: Include variable tasks + ansible.builtin.include_tasks: + file: "{{ playbook_dir }}/common/60_check_vars.yml" + apply: + tags: + - always + tags: + - always + + tasks: + - name: Install OS packages + ansible.builtin.dnf: + name: + - podman + - git + - ansible-core + - firewalld + - python3-pip + state: present + + - name: Start and enable firewalld service + ansible.builtin.service: + name: firewalld + state: started + enabled: true + - name: Include common image cleanup tasks ansible.builtin.include_tasks: file: "{{ playbook_dir }}/common/10_image_cleanup.yml" @@ -599,14 +567,29 @@ ansible.builtin.known_hosts: name: "{{ item.name }}" state: absent - loop: "{{ gcp_instances }}" + loop: "{{ gcp_resources_instances }}" - name: Remove known_hosts entries for {{ admin_username }} ansible.builtin.known_hosts: name: "{{ item.name }}" state: absent become: false - loop: "{{ gcp_instances }}" + loop: "{{ gcp_resources_instances }}" + + - name: Build install files cache + ansible.builtin.dnf: + name: + - cockpit + - cockpit-podman + - postgresql-server + state: present + download_only: true + + - name: Pull docker.io/dpage/pgadmin4 image + containers.podman.podman_image: + name: docker.io/dpage/pgadmin4 + tag: latest + state: present # Create GCP images - name: Create instruqt images @@ -626,159 +609,30 @@ - always tasks: - - name: Stop gcloud instances - when: (gcp_instances is defined) and (gcp_instances | length > 0) - tags: - - stop-instances - block: - - name: Stop gcloud instances - google.cloud.gcp_compute_instance: - name: "{{ __instances_stop_async_item.name }}" - status: TERMINATED - zone: "{{ gcp_zone }}" - labels: "{{ __create_instances_job_async_item.labels | default(omit) }}" - tags: "{{ __create_instances_job_async_item.tags | default(omit) }}" - <<: *gcp_login - loop: "{{ gcp_instances }}" - loop_control: - loop_var: __instances_stop_async_item - label: "{{ __instances_stop_async_item.name }}" - async: 1000 - poll: 0 - register: __instances_stop_async - changed_when: not __instances_stop_async.changed - - - name: "Async | Stop gcloud instances" - ansible.builtin.async_status: - jid: "{{ __instances_stop_async_result_item.ansible_job_id }}" - retries: 120 - delay: 1 - loop: "{{ __instances_stop_async.results }}" - loop_control: - loop_var: __instances_stop_async_result_item - label: "{{ __instances_stop_async_result_item.__instances_stop_async_item.name }}" - until: __instances_stop_async_result.finished - register: __instances_stop_async_result - when: __instances_stop_async_result_item.ansible_job_id is defined - - - name: Remove current gcloud images - when: (gcp_instances is defined) and (gcp_instances | length > 0) - tags: - - remove-images - block: - - name: Remove current gcloud images - google.cloud.gcp_compute_image: - name: "{{ __images_remove_async_item.name }}-image" - state: absent - <<: *gcp_login - loop: "{{ gcp_instances }}" - loop_control: - loop_var: __images_remove_async_item - label: "{{ __images_remove_async_item.name }}" - async: 1000 - poll: 0 - register: __images_remove_async - changed_when: not __images_remove_async.changed + - name: Stop GCP instances + vars: + gcp_resources_instances_status: TERMINATED + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_instances.yml - - name: "Async | Remove current gcloud images" - ansible.builtin.async_status: - jid: "{{ __images_remove_async_result_item.ansible_job_id }}" - retries: 175 - delay: 1 - loop: "{{ __images_remove_async.results }}" - loop_control: - loop_var: __images_remove_async_result_item - label: "{{ __images_remove_async_result_item.__images_remove_async_item.name }}" - until: __images_remove_async_result.finished - register: __images_remove_async_result - when: __images_remove_async_result_item.ansible_job_id is defined - rescue: - - name: Print error message - ansible.builtin.fail: - msg: | - "Rescue - Unable to remove gcloud images." - "{{ __images_remove_async_result }}" - notify: - - remove instances - - - name: Create new Instruqt images - notify: - - remove instances - block: - - name: Create gcloud images - google.cloud.gcp_compute_image: - name: "{{ __images_create_async_item.name }}-image" - state: present - source_disk: - selfLink: "https://www.googleapis.com/compute/v1/projects/{{ gcp_project }}/zones/{{ gcp_zone }}/disks/{{ __images_create_async_item.name }}" # yamllint disable-line rule:line-length - labels: - role: "{{ track_slug }}" - description: "{{ track_slug }} - {{ __images_create_async_item.name }} image." - <<: *gcp_login - loop: "{{ gcp_instances }}" - loop_control: - loop_var: __images_create_async_item - label: "{{ __images_create_async_item.name }}" - async: 1000 - poll: 0 - register: __images_create_async - changed_when: not __images_create_async.changed + - name: Remove current GCP images + vars: + gcp_resources_images_state: absent + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_images.yml - - name: "Async | Create new Instruqt gcloud images" - ansible.builtin.async_status: - jid: "{{ __images_create_async_result_item.ansible_job_id }}" - retries: 40 - delay: 5 - loop: "{{ __images_create_async.results }}" - loop_control: - loop_var: __images_create_async_result_item - label: "{{ __images_create_async_result_item.__images_create_async_item.name }}" - until: __images_create_async_result.finished - register: __images_create_async_result - when: __images_create_async_result_item.ansible_job_id is defined - rescue: - - name: Print error message - ansible.builtin.fail: - msg: | - "Rescue - Unable to remove gcloud images." - "{{ __images_create_async_result }}" - notify: - - remove instances + - name: Create new GCP images + vars: + gcp_resources_images_state: present + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_images.yml - handlers: - name: Remove GCP instances - listen: remove instances - google.cloud.gcp_compute_instance: - name: "{{ __remove_instances_job_async_item.name }}" - state: absent - zone: "{{ gcp_zone }}" - labels: "{{ __create_instances_job_async_item.labels | default(omit) }}" - tags: "{{ __create_instances_job_async_item.tags | default(omit) }}" - <<: *gcp_login - loop: "{{ gcp_instances }}" - loop_control: - loop_var: __remove_instances_job_async_item - label: "{{ __remove_instances_job_async_item.name }}" - async: 1000 - poll: 0 - register: __remove_instances_job_async - changed_when: not __remove_instances_job_async.changed - when: (gcp_instances is defined) and (gcp_instances | length > 0) - tags: - - remove-instances - - - name: "Remove GCP instances | Wait for finish the instance deletion" - listen: remove instances - ansible.builtin.async_status: - jid: "{{ __remove_instances_job_async_result_item.ansible_job_id }}" - retries: 35 - delay: 5 - loop: "{{ __remove_instances_job_async.results }}" - loop_control: - loop_var: __remove_instances_job_async_result_item - label: "{{ __remove_instances_job_async_result_item.__remove_instances_job_async_item.name }}" - until: __remove_instances_job_async_result.finished - register: __remove_instances_job_async_result - when: __remove_instances_job_async_result_item.ansible_job_id is defined - tags: - - remove-instances + vars: + gcp_resources_instances_state: absent + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_instances.yml diff --git a/images/ansible/roles/gcp_resources/README.md b/images/ansible/roles/gcp_resources/README.md new file mode 100644 index 00000000..d6586e4c --- /dev/null +++ b/images/ansible/roles/gcp_resources/README.md @@ -0,0 +1,49 @@ +Role Name +========= + +Manage GCP resources + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: localhost + vars: + gcp_resources_instances: + - name: "gcp-instance-name" + network_interfaces: + - access_configs: + - name: External NAT + type: ONE_TO_ONE_NAT + disks: + - auto_delete: true + boot: true + device_name: "gcp-instance-name-disk" + initialize_params: + source_image: projects/rhel-cloud/global/images/family/rhel-9 + disk_size_gb: 40 + disk_type: pd-balanced + labels: + label_name: label1 + machine_type: n2-standard-2 + metadata: + ssh-keys: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" + tags: + items: + - https-server + tasks: + - name: Provision GCP instance + ansible.builtin.include_role: + name: gcp_resources + tasks_from: manage_gcp_instances.yml + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/images/ansible/roles/gcp_resources/defaults/main.yml b/images/ansible/roles/gcp_resources/defaults/main.yml new file mode 100644 index 00000000..10432783 --- /dev/null +++ b/images/ansible/roles/gcp_resources/defaults/main.yml @@ -0,0 +1,8 @@ +--- +gcp_resources_zone: "{{ lookup('ansible.builtin.env', 'GCP_ZONE', default='') }}" +gcp_resources_project: "{{ lookup('ansible.builtin.env', 'GCP_PROJECT', default='') }}" +gcp_resources_instances: [] +gcp_resources_instances_state: present +gcp_resources_instances_status: RUNNING +gcp_resources_images: [] +gcp_resources_images_state: present diff --git a/images/ansible/roles/gcp_resources/handlers/main.yml b/images/ansible/roles/gcp_resources/handlers/main.yml new file mode 100644 index 00000000..605b9f97 --- /dev/null +++ b/images/ansible/roles/gcp_resources/handlers/main.yml @@ -0,0 +1,31 @@ +--- +- name: Remove GCP instances + listen: remove instances + google.cloud.gcp_compute_instance: + name: "{{ __remove_instances_job_async_item.name }}" + state: absent + zone: "{{ gcp_zone }}" + labels: "{{ __create_instances_job_async_item.labels | default(omit) }}" + tags: "{{ __create_instances_job_async_item.tags | default(omit) }}" + loop: "{{ gcp_instances }}" + loop_control: + loop_var: __remove_instances_job_async_item + label: "{{ __remove_instances_job_async_item.name }}" + async: 1000 + poll: 0 + register: __remove_instances_job_async + changed_when: not __remove_instances_job_async.changed + +- name: Remove GCP instances | Async + listen: remove instances + ansible.builtin.async_status: + jid: "{{ __remove_instances_job_async_result_item.ansible_job_id }}" + retries: 180 + delay: 1 + loop: "{{ __remove_instances_job_async.results }}" + loop_control: + loop_var: __remove_instances_job_async_result_item + label: "{{ __remove_instances_job_async_result_item.__remove_instances_job_async_item.name }}" + until: __remove_instances_job_async_result.finished + register: __remove_instances_job_async_result + when: __remove_instances_job_async_result_item.ansible_job_id is defined diff --git a/images/ansible/roles/gcp_resources/meta/argument_specs.yml b/images/ansible/roles/gcp_resources/meta/argument_specs.yml new file mode 100644 index 00000000..ebf87df5 --- /dev/null +++ b/images/ansible/roles/gcp_resources/meta/argument_specs.yml @@ -0,0 +1,45 @@ +--- +argument_specs: + # roles/gcp_resources/tasks/manage_gcp_instances.yml entry point + manage_gcp_instances: + short_description: Manage GCP instances + options: + gcp_resources_zone: + type: "str" + required: true + default: "GCP_ZONE" + description: "The GCP zone to provision resources. + Defaults to 'GCP_ZONE' environment variable." + gcp_resources_project: + type: "str" + required: true + default: "GCP_PROJECT" + description: "The GCP Project name to provision resources. + Defaults to 'GCP_PROJECT' environment variable." + gcp_resources_instances_state: + type: "str" + required: true + default: "present" + description: "State of the GCP instance." + gcp_resources_instances_status: + type: "str" + required: true + default: RUNNING + description: "The status of the GCP instance." + gcp_resources_instances: + type: list + required: true + description: The details of the GCP instances. Please refer to the README for an example. + # roles/gcp_resources/tasks/manage_gcp_instances.yml entry point + manage_gcp_images: + short_description: Manage GCP images + options: + gcp_resources_images: + type: list + required: true + default: present + description: The details of the GCP images. + gcp_resources_images_state: + type: str + required: true + description: GCP image state. diff --git a/images/ansible/roles/gcp_resources/meta/main.yml b/images/ansible/roles/gcp_resources/meta/main.yml new file mode 100644 index 00000000..232d29f4 --- /dev/null +++ b/images/ansible/roles/gcp_resources/meta/main.yml @@ -0,0 +1,18 @@ +galaxy_info: + author: Craig Brandt + description: Manage Google Cloud resources + + issue_tracker_url: http://github.com/ansible/instruqt/issues + + license: MIT + + min_ansible_version: "2.13" + + platforms: + - name: EL + versions: + - "9" + + galaxy_tags: [] + +dependencies: [] diff --git a/images/ansible/roles/gcp_resources/tasks/main.yml b/images/ansible/roles/gcp_resources/tasks/main.yml new file mode 100644 index 00000000..5ec282f2 --- /dev/null +++ b/images/ansible/roles/gcp_resources/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# tasks file for gcp_instances diff --git a/images/ansible/roles/gcp_resources/tasks/manage_gcp_images.yml b/images/ansible/roles/gcp_resources/tasks/manage_gcp_images.yml new file mode 100644 index 00000000..ba5d06bc --- /dev/null +++ b/images/ansible/roles/gcp_resources/tasks/manage_gcp_images.yml @@ -0,0 +1,57 @@ +--- +- name: Manage Google Compute Images + block: + - name: Manage GCP Images - {{ gcp_resources_images_state }} + google.cloud.gcp_compute_image: + auth_kind: "{{ __manage_images_async_item.auth_kind | default(omit) }}" + description: "{{ __manage_images_async_item.description | default(omit) }}" + disk_size_gb: "{{ __manage_images_async_item.disk_size_gb | default(omit) }}" + family: "{{ __manage_images_async_item.family | default(omit) }}" + guest_os_features: "{{ __manage_images_async_item.guest_os_features | default(omit) }}" + image_encryption_key: "{{ __manage_images_async_item.image_encryption_key | default(omit) }}" + licenses: "{{ __manage_images_async_item.licenses | default(omit) }}" + name: "{{ __manage_images_async_item.name }}" + project: "{{ gcp_project | default(omit) }}" + raw_disk: "{{ __manage_images_async_item.raw_disk | default(omit) }}" + source_disk: "{{ __manage_images_async_item.source_disk | default(omit) }}" + source_disk_encryption_key: "{{ __manage_images_async_item.source_disk_encryption_key | default(omit) }}" + source_disk_id: "{{ __manage_images_async_item.source_disk_id | default(omit) }}" + source_image: "{{ __manage_images_async_item.source_image | default(omit) }}" + source_snapshot: "{{ __manage_images_async_item.source_snapshot | default(omit) }}" + state: "{{ __manage_images_async_item.state | default(gcp_resources_images_state) }}" + labels: "{{ __manage_images_async_item.labels | default(omit) }}" + source_type: "{{ __manage_images_async_item.source_type | default(omit) }}" + service_account_contents: "{{ __manage_images_async_item.service_account_contents | default(omit) }}" + service_account_file: "{{ __manage_images_async_item.service_account_file | default(omit) }}" + service_account_email: "{{ __manage_images_async_item.service_account_email | default(omit) }}" + access_token: "{{ __manage_images_async_item.access_token | default(omit) }}" + scopes: "{{ __manage_images_async_item.scopes | default(omit) }}" + loop: "{{ gcp_resources_images }}" + loop_control: + loop_var: __manage_images_async_item + label: "{{ __manage_images_async_item.name }}" + async: 1000 + poll: 0 + register: __manage_images_async + changed_when: not __manage_images_async.changed + + - name: Manage GCP Images | Async + ansible.builtin.async_status: + jid: "{{ __manage_images_async_result_item.ansible_job_id }}" + retries: 180 + delay: 1 + loop: "{{ __manage_images_async.results }}" + loop_control: + loop_var: __manage_images_async_result_item + label: "{{ __manage_images_async_result_item.__manage_images_async_item.name }}" + until: __manage_images_async_result.finished + register: __manage_images_async_result + when: __manage_images_async_result_item.ansible_job_id is defined + rescue: + - name: Print error message + ansible.builtin.fail: + msg: | + "Rescue - Unable to remove gcloud images." + "{{ __manage_images_async_result }}" + notify: + - remove instances diff --git a/images/ansible/roles/gcp_resources/tasks/manage_gcp_instances.yml b/images/ansible/roles/gcp_resources/tasks/manage_gcp_instances.yml new file mode 100644 index 00000000..1ef11b58 --- /dev/null +++ b/images/ansible/roles/gcp_resources/tasks/manage_gcp_instances.yml @@ -0,0 +1,60 @@ +--- +- name: Manage Google Cloud instances + block: + - name: Manage Google Cloud instances - {{ gcp_resources_instances_state }} {{ gcp_resources_instances_status }} # noqa name[template] + google.cloud.gcp_compute_instance: + auth_kind: "{{ __manage_instances_job_async_item.auth_kind | default(omit) }}" + name: "{{ __manage_instances_job_async_item.name }}" + machine_type: "{{ __manage_instances_job_async_item.machine_type }}" + labels: "{{ __manage_instances_job_async_item.labels | default(omit) }}" + tags: "{{ __manage_instances_job_async_item.tags | default(omit) }}" + state: "{{ __manage_instances_job_async_item.state | default(gcp_resources_instances_state) }}" + zone: "{{ __manage_instances_job_async_item.zone | default(gcp_resources_zone) }}" + network_interfaces: "{{ __manage_instances_job_async_item.network_interfaces | default(omit) }}" + disks: "{{ __manage_instances_job_async_item.disks }}" + metadata: "{{ __manage_instances_job_async_item.metadata | default(omit) }}" + can_ip_forward: "{{ __manage_instances_job_async_item.can_ip_forward | default(omit) }}" + guest_accelerators: "{{ __manage_instances_job_async_item.guest_accelerators | default(omit) }}" + min_cpu_platform: "{{ __manage_instances_job_async_item.min_cpu_platform | default(omit) }}" + project: "{{ __manage_instances_job_async_item.project | default(gcp_resources_project) }}" + service_account_contents: "{{ gcp_service_account_contents | default(omit) }}" + service_account_email: "{{ gcp_service_account_email | default(omit) }}" + service_account_file: "{{ gcp_service_account_file | default(omit) }}" + scopes: "{{ __manage_instances_job_async_item.scopes | default(omit) }}" + service_accounts: "{{ gcp_service_accounts | default(omit) }}" + deletion_protection: "{{ __manage_instances_job_async_item.deletion_protection | default(omit) }}" + hostname: "{{ __manage_instances_job_async_item.hostname | default(omit) }}" + scheduling: "{{ __manage_instances_job_async_item.scheduling | default(omit) }}" + shielded_instance_config: "{{ __manage_instances_job_async_item.shielded_instance_config | default(omit) }}" + confidential_instance_config: "{{ __manage_instances_job_async_item.confidential_instance_config | default(omit) }}" + status: "{{ __manage_instances_job_async_item.status | default(gcp_resources_instances_status) }}" + access_token: "{{ __manage_instances_job_async_item.access_token | default(omit) }}" + loop: "{{ gcp_resources_instances }}" + loop_control: + loop_var: __manage_instances_job_async_item + label: "{{ __manage_instances_job_async_item.name }}" + async: 1000 + poll: 0 + register: __manage_instances_job_async + changed_when: not __manage_instances_job_async.changed + + - name: Manage Google Cloud instances | Async + ansible.builtin.async_status: + jid: "{{ __manage_instances_job_async_result_item.ansible_job_id }}" + retries: 120 + delay: 1 + loop: "{{ __manage_instances_job_async.results }}" + loop_control: + loop_var: __manage_instances_job_async_result_item + label: "{{ __manage_instances_job_async_result_item.__manage_instances_job_async_item.name }}" + until: __manage_instances_job_async_result.finished + register: __manage_instances_job_async_result + when: __manage_instances_job_async_result_item.ansible_job_id is defined + rescue: + - name: Print error message + ansible.builtin.fail: + msg: | + "Rescue - An error occurred managing GCP instances. + "{{ __manage_instances_job_async_result }}" + notify: + - remove instances diff --git a/images/ansible/roles/gcp_resources/tests/inventory b/images/ansible/roles/gcp_resources/tests/inventory new file mode 100644 index 00000000..878877b0 --- /dev/null +++ b/images/ansible/roles/gcp_resources/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/images/ansible/roles/gcp_resources/tests/test.yml b/images/ansible/roles/gcp_resources/tests/test.yml new file mode 100644 index 00000000..bedb67fe --- /dev/null +++ b/images/ansible/roles/gcp_resources/tests/test.yml @@ -0,0 +1,6 @@ +--- +- name: Test gcp_resources + hosts: localhost + become: false + roles: + - gcp_resources diff --git a/images/ansible/vars/lightspeed-101/vars.yml b/images/ansible/vars/lightspeed-101/vars.yml index faa07b56..365892bf 100644 --- a/images/ansible/vars/lightspeed-101/vars.yml +++ b/images/ansible/vars/lightspeed-101/vars.yml @@ -14,40 +14,55 @@ ansible_user: "{{ admin_username }}" aap_dir: /home/{{ ansible_user }}/aap_install controller_install_command: ANSIBLE_BECOME_METHOD='sudo' ANSIBLE_BECOME=True set -o pipefail && ./setup.sh -e registry_username='{{ registry_username }}' -e registry_password='{{ registry_password }}' # noqa yaml[line-length] offline_token: "{{ lookup('ansible.builtin.env', 'REDHAT_OFFLINE_TOKEN') }}" -provided_sha_value: d3ecb5932c9f154ad279dd2f28ce3f711f1447b54ee92f50698a601f853644c3 # RHEL 9 2.4 +provided_sha_value: "{{ lookup('ansible.builtin.env', 'AAP_SHA_VALUE', default='7c4509b3436c7423a60a65815493b3d66162acd09dbca131a9b5edad9e319a40') }}" # noqa yaml[line-length] # RHEL 9 2.4 # GCP vars -gcp_zone: us-central1-a -gcp_project: "{{ lookup('ansible.builtin.env', 'GCP_PROJECT', default='red-hat-mbu') }}" +gcp_resources_zone: us-central1-a +gcp_resources_project: "{{ lookup('ansible.builtin.env', 'GCP_PROJECT', default='red-hat-mbu') }}" gcp_service_account_file: "{{ lookup('ansible.builtin.env', 'GCP_SERVICE_ACCOUNT_FILE') }}" gcp_service_account: "{{ lookup('ansible.builtin.env', 'GCP_SERVICE_ACCOUNT') }}" -gcp_instances: +gcp_resources_instances: - name: "{{ controller_hostname }}" - # source_image: automation-controller + labels: + role: "{{ track_slug }}" + mesh-type: controller + machine_type: n2-standard-4 network_interfaces: - access_configs: - - name: External NAT # yamllint disable-line rule:indentation - type: ONE_TO_ONE_NAT + - name: External NAT # noqa yaml[indentation] + type: ONE_TO_ONE_NAT disks: - auto_delete: true boot: true - device_name: "{{ track_slug }}-controller" + device_name: "{{ controller_hostname }}" initialize_params: - # source_image: "projects/{{ gcp_project }}/global/images/lightspeed-101-controller-image" source_image: projects/rhel-cloud/global/images/family/rhel-9 - # source_image: "projects/{{ gcp_project }}/global/images/rhel9" - disk_size_gb: 40 + disk_size_gb: 20 disk_type: pd-balanced + tags: + items: + - https-server + metadata: + ssh-keys: "{{ lookup('file', '~/.ssh/instruqt/instruqt_provision.pub') }}" + - name: "{{ track_slug }}-rhel-node" labels: role: "{{ track_slug }}" - mesh-type: controller - location: raleigh - machine_type: n2-standard-4 + mesh-type: rhel-node + machine_type: e2-standard-2 + network_interfaces: + - access_configs: + - name: External NAT # noqa yaml[indentation] + type: ONE_TO_ONE_NAT + disks: + - auto_delete: true + boot: true + device_name: "{{ track_slug }}-rhel-node" + initialize_params: + source_image: projects/rhel-cloud/global/images/family/rhel-9 + disk_size_gb: 20 + disk_type: pd-balanced metadata: ssh-keys: "{{ lookup('file', '~/.ssh/instruqt/instruqt_provision.pub') }}" - tags: - items: - - https-server gcp_inventory_host_vars: ansible_user: "{{ admin_username }}" ansible_ssh_private_key_file: ~/.ssh/instruqt/instruqt_provision @@ -55,11 +70,27 @@ gcp_inventory_host_vars: ansible_python_interpreter: /usr/bin/python3 ansible_ssh_pipelining: true +gcp_resources_images: + - name: "{{ controller_hostname }}-image" + source_disk: + selfLink: "https://www.googleapis.com/compute/v1/projects/{{ gcp_resources_project }}/zones/\ + {{ gcp_resources_zone }}/disks/{{ controller_hostname }}" + labels: + role: "{{ track_slug }}" + description: "{{ track_slug }} - {{ controller_hostname }} image." + - name: "{{ track_slug }}-rhel-node-image" + source_disk: + selfLink: "https://www.googleapis.com/compute/v1/projects/{{ gcp_resources_project }}/zones/\ + {{ gcp_resources_zone }}/disks/{{ track_slug }}-rhel-node" + labels: + role: "{{ track_slug }}" + description: "{{ track_slug }} - {{ track_slug }}-rhel-node image." + # Base controller custom setup vars admin_username: "{{ vault_admin_username }}" admin_password: "{{ vault_admin_password }}" -redhat_username: "{{ lookup('ansible.builtin.env', 'REDHAT_USERNAME', default='') }}" -redhat_password: "{{ lookup('ansible.builtin.env', 'REDHAT_PASSWORD', default='') }}" +redhat_username: "{{ lookup('ansible.builtin.env', 'REDHAT_USERNAME') }}" +redhat_password: "{{ lookup('ansible.builtin.env', 'REDHAT_PASSWORD') }}" registry_username: "{{ redhat_username }}" registry_password: "{{ redhat_password }}" @@ -80,18 +111,10 @@ gitea_app_url: "{{ gitea_protocol }}://{{ gitea_hostname }}:{{ gitea_http_port } gitea_repo_name: acme_corp gitea_admin_username: "{{ vault_gitea_admin_username }}" gitea_admin_password: "{{ vault_gitea_admin_password }}" -gitea_clone_address: "https://github.com/craig-br/lightspeed-demos" +gitea_clone_address: "https://github.com/ansible/ansible-lightspeed-demos" vscode_settings_file: "{{ playbook_dir }}/templates/{{ track_slug }}/settings.json" -# code_server role vars -# codeserver_username: "{{ student_username }}" -# codeserver_password: "{{ student_password }}" -# codeserver_prebuild: false -# codeserver_authentication: false -# codeserver_extensions: -# - name: redhat.ansible - # Lab vars lab: credential: @@ -115,20 +138,19 @@ lab: name: ACME Corp Repo repo: "{{ gitea_app_url }}/{{ student_username }}/acme_corp.git" # Temp - # branch: devel - branch: main + branch: devel + # branch: main inventory: name: ACME Corp DC description: ACME Corp Data center navigator_execution_environment: name: ACME Corp execution environment image: quay.io/acme_corp/lightspeed-101_ee - # image: quay.io/acme_corp/summit_ee s3: # Dev - # bucket_name: tmm-instruqt-content.demoredhat.com.dev + bucket_name: tmm-instruqt-content.demoredhat.com.dev # Prod - bucket_name: tmm-instruqt-content.demoredhat.com.private + # bucket_name: tmm-instruqt-content.demoredhat.com.private workflow_name: Deploy App Workflow # Lab containers @@ -177,6 +199,8 @@ gnome_gsettings: - org.gnome.shell.extensions.desktop-icons show-trash false - org.gnome.desktop.wm.preferences button-layout ":minimize,maximize,close" +controller_pip_packages: + - selenium controller_os_packages: - ansible-core - ansible-navigator @@ -187,10 +211,6 @@ controller_os_packages: - google-noto-emoji-color-fonts - google-noto-emoji-fonts -# controller_pip_packages: -# - ansible-navigator -# - ansible-lint - controller_collections: - redhat_cop.controller_configuration - awx.awx @@ -313,6 +333,7 @@ controller_execution_environments: state: present pull: never +# Must have scm_update_on_launch: true for lab controller_projects: - name: "{{ lab.project.name }}" organization: "{{ lab.organization }}" @@ -332,7 +353,6 @@ controller_inventories: description: "{{ lab.inventory.name }}" variables: ansible_ssh_private_key_file: ~/.ssh/instruqt_lab - ansible_host: lightspeed-101-controller ansible_user: rhel ansible_python_interpreter: /usr/bin/python3 ansible_ssh_extra_args: '-o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=60s' @@ -340,23 +360,40 @@ controller_inventories: controller_hosts: - name: controller.acme.example.com + variables: + ansible_host: "{{ track_slug }}-controller" inventory: "{{ lab.inventory.name }}" enabled: true - name: app-01.acme.example.com inventory: "{{ lab.inventory.name }}" enabled: true + variables: + ansible_host: "{{ track_slug }}-rhel-node1" + - name: rhel-01.acme.example.com + inventory: "{{ lab.inventory.name }}" + enabled: true + variables: + ansible_host: "{{ track_slug }}-rhel-node1" - name: app-02.acme.example.com inventory: "{{ lab.inventory.name }}" enabled: true + variables: + ansible_host: "{{ track_slug }}-rhel-node1" - name: monitor-01.acme.example.com inventory: "{{ lab.inventory.name }}" enabled: true + variables: + ansible_host: "{{ track_slug }}-rhel-node1" - name: db-01.acme.example.com inventory: "{{ lab.inventory.name }}" enabled: true + variables: + ansible_host: "{{ track_slug }}-rhel-node1" - name: web-01.acme.example.com inventory: "{{ lab.inventory.name }}" enabled: true + variables: + ansible_host: "{{ track_slug }}-rhel-node1" controller_groups: - name: appservers @@ -374,7 +411,7 @@ controller_groups: description: ACME Corp monitoring servers inventory: "{{ lab.inventory.name }}" hosts: - - monitor-01.acme.example.com + - rhel-01.acme.example.com - name: databases description: ACME Corp monitoring servers inventory: "{{ lab.inventory.name }}" @@ -392,19 +429,19 @@ controller_groups: - controller.acme.example.com # controller_templates: -# - name: Prepare AWS demo -# state: present -# job_type: run -# playbook: "playbooks/cloud/aws/prepare_aws_environment.yml" -# execution_environment: "{{ lab.navigator_execution_environment.name }}" -# organization: "{{ lab.organization }}" -# inventory: "{{ lab.inventory.name }}" -# verbosity: 0 -# credentials: -# - "{{ lab.credential.aws.name }}" -# project: "{{ lab.project.name }}" -# extra_vars: -# _SANDBOX_ID: "{{ lookup('ansible.builtin.env', '_SANDBOX_ID') }}" # Updated in Instruqt lifecycle script. + # - name: Prepare AWS demo + # state: present + # job_type: run + # playbook: playbooks/cloud/aws/prepare_aws_environment.yml + # execution_environment: "{{ lab.navigator_execution_environment.name }}" + # organization: "{{ lab.organization }}" + # inventory: "{{ lab.inventory.name }}" + # verbosity: 0 + # credentials: + # - "{{ lab.credential.aws.name }}" + # project: "{{ lab.project.name }}" + # extra_vars: + # _SANDBOX_ID: "{{ lookup('ansible.builtin.env', '_SANDBOX_ID') }}" # Updated in Instruqt lifecycle script. # - name: Prepare Azure demo # state: present # job_type: run