diff --git a/roles/afm_cos_configure/tasks/afm_configure.yml b/roles/afm_cos_configure/tasks/afm_configure.yml index 30a8ae94..8f0afe87 100644 --- a/roles/afm_cos_configure/tasks/afm_configure.yml +++ b/roles/afm_cos_configure/tasks/afm_configure.yml @@ -22,30 +22,57 @@ when: - scale_existing_bucket.stdout_lines is defined + - name: create | Find existing Bucket(s) + shell: mmafmcoskeys all get --report | grep -v HEADER | cut -d '=' -f 1 | uniq + register: scale_existing_bucket_list + changed_when: false + failed_when: false + + - debug: + msg: "{{scale_existing_bucket_list.stdout_lines}}" + changed_when: false + failed_when: false + when: + - scale_existing_bucket_list.stdout_lines is defined + - name: create | Initialize set_fact: extra_option_flag: "" + all_existing_bucket: [] + + - set_fact: + all_existing_bucket: "{{ scale_existing_bucket.stdout_lines + scale_existing_bucket_list.stdout_lines }}" + + + - debug: + msg: "{{all_existing_bucket}}" - name: create | Set existing bucket if it is defined set_fact: extra_option_flag: "{{ extra_option_flag }} --bucket {{ item.bucket }}" when: - - item.bucket in scale_existing_bucket.stdout_lines + - item.bucket in all_existing_bucket - item.bucket is defined - name: create | Set new bucket if it is defined set_fact: extra_option_flag: "{{ extra_option_flag }} --new-bucket {{ item.bucket }}" when: - - item.bucket not in scale_existing_bucket.stdout_lines + - item.bucket not in all_existing_bucket - item.bucket is defined - name: create | Set mode if it is defined set_fact: extra_option_flag: "{{ extra_option_flag }} --mode {{ item.mode | default('sw') }}" + + - name: create | Set extra parameter config + set_fact: + extra_option_flag: "{{ extra_option_flag }} --gcs" + when: "scale_afm_cos_config_params is defined and + (scale_afm_cos_config_params.is_gcs is defined and scale_afm_cos_config_params.is_gcs|bool)" - name: configure | Create a AFM cos relationship with filesets - command: "{{ scale_command_path }}mmafmcosconfig {{ item.filesystem }} {{ item.fileset }} --endpoint {{ item.endpoint }} {{ extra_option_flag }}" + command: "{{ scale_command_path }}mmafmcosconfig {{ item.filesystem }} {{ item.fileset }} --endpoint {{ item.endpoint }} {{ extra_option_flag }} --directory-object" register: scale_afm_cos_define failed_when: scale_afm_cos_define.rc != 0 when: diff --git a/roles/afm_cos_configure/tasks/afm_gateway.yml b/roles/afm_cos_configure/tasks/afm_gateway.yml index c973ffc1..662931be 100644 --- a/roles/afm_cos_configure/tasks/afm_gateway.yml +++ b/roles/afm_cos_configure/tasks/afm_gateway.yml @@ -7,7 +7,9 @@ add_host: name: "{{ item }}" groups: scale_gateway_nodes - when: hostvars[item].scale_cluster_gateway | bool + when: + - hostvars[item].scale_cluster_gateway is defined + - hostvars[item].scale_cluster_gateway | bool with_items: "{{ ansible_play_hosts }}" changed_when: false @@ -15,17 +17,38 @@ vars: scale_gateway_node: "{{ groups['scale_gateway_nodes'] | list }}" command: "{{ scale_command_path }}mmchlicense server --accept -N {{ scale_gateway_node | join(',') }}" - when: groups['scale_gateway_nodes'] | list | length > 0 + when: + - groups['scale_gateway_nodes'] is defined + - groups['scale_gateway_nodes'] | list | length > 0 run_once: true - name: configure | Enabling AFM gateway vars: scale_gateway_node: "{{ groups['scale_gateway_nodes'] | list }}" command: "{{ scale_command_path }}mmchnode -N {{ scale_gateway_node | join(',') }} --gateway" - when: groups['scale_gateway_nodes'] | list | length > 0 + when: + - groups['scale_gateway_nodes'] is defined + - groups['scale_gateway_nodes'] | list | length > 0 register: result_gateway run_once: true - debug: msg: "{{ result_gateway.cmd }}" when: result_gateway.cmd is defined + +- name: storage | Mount filesystem(s) on AFM gateway nodes + vars: + scale_gateway_node: "{{ groups['scale_gateway_nodes'] | list }}" + command: "{{ scale_command_path }}mmmount all -N {{ scale_gateway_node | join(',') }}" + changed_when: false + run_once: true + failed_when: false + ignore_errors: yes + register: scale_mount_fs_cloud + when: + - groups['scale_gateway_nodes'] is defined + - groups['scale_gateway_nodes'] | list | length > 0 + +- debug: + msg: "{{ scale_mount_fs_cloud }}" + when: scale_mount_fs_cloud is defined diff --git a/roles/ces_common/tasks/configure.yml b/roles/ces_common/tasks/configure.yml index aa15efcd..3dfba1d3 100644 --- a/roles/ces_common/tasks/configure.yml +++ b/roles/ces_common/tasks/configure.yml @@ -134,6 +134,29 @@ ( scale_service_status.rc == 0 ) run_once: true +- name: configure | Check if S3 is running + shell: + cmd: "{{ scale_command_path }}mmces service list|grep S3" + register: scale_service_status + when: inventory_hostname in scale_protocol_node_list + ignore_errors: true + failed_when: false + run_once: true + +- name: configure | Add S3 service to the list + set_fact: + scale_service_list: "{{ scale_service_list + [scale_service_status.stderr|regex_search('S3')] }}" + when: (inventory_hostname in scale_protocol_node_list) and + ( scale_service_status.rc > 0 ) + run_once: true + +- name: configure | Add S3 service to the list + set_fact: + scale_service_list: "{{ scale_service_list + ['S3'] }}" + when: (inventory_hostname in scale_protocol_node_list) and + ( scale_service_status.rc == 0 ) + run_once: true + - import_role: name: ibm.spectrum_scale.nfs_install when: scale_ces_disabled_nodes|length > 0 and 'NFS' in scale_service_list @@ -146,6 +169,10 @@ name: ibm.spectrum_scale.obj_install when: scale_ces_disabled_nodes|length > 0 and 'OBJ' in scale_service_list +- import_role: + name: ibm.spectrum_scale.s3_install + when: scale_ces_disabled_nodes|length > 0 and 'S3' in scale_service_list + - name: configure | Prepare ces nodes string set_fact: scale_ces_nodes: "{{ scale_ces_nodes + ',' + item|string }}" diff --git a/roles/ces_common/tasks/main.yml b/roles/ces_common/tasks/main.yml index f741dad9..ce888e87 100644 --- a/roles/ces_common/tasks/main.yml +++ b/roles/ces_common/tasks/main.yml @@ -5,7 +5,8 @@ (scale_protocols.nfs is defined and scale_protocols.nfs|bool) or (scale_protocols.smb is defined and scale_protocols.smb|bool) or (scale_protocols.object is defined and scale_protocols.object|bool) or - (scale_protocols.hdfs is defined and scale_protocols.hdfs|bool)" + (scale_protocols.hdfs is defined and scale_protocols.hdfs|bool) or + (scale_protocols.s3 is defined and scale_protocols.s3|bool)" tags: always - include_tasks: configure.yml @@ -13,5 +14,6 @@ (scale_protocols.nfs is defined and scale_protocols.nfs|bool) or (scale_protocols.smb is defined and scale_protocols.smb|bool) or (scale_protocols.object is defined and scale_protocols.object|bool) or - (scale_protocols.hdfs is defined and scale_protocols.hdfs|bool)" + (scale_protocols.hdfs is defined and scale_protocols.hdfs|bool) or + (scale_protocols.s3 is defined and scale_protocols.s3|bool)" tags: configure diff --git a/roles/core_upgrade/tasks/install_repository.yml b/roles/core_upgrade/tasks/install_repository.yml index f96e394a..af762617 100644 --- a/roles/core_upgrade/tasks/install_repository.yml +++ b/roles/core_upgrade/tasks/install_repository.yml @@ -7,12 +7,17 @@ - name: Initialize set_fact: is_scale_java_pkg_installed: false + base_repo_url: "gpfs_rpms/" + +- set_fact: + base_repo_url: "" + when: scale_baseurl is defined - name: upgrade | Configure GPFS YUM repository yum_repository: name: spectrum-scale-gpfs description: IBM Spectrum Scale (GPFS) - baseurl: "{{ scale_install_repository_url }}gpfs_rpms/" + baseurl: "{{ scale_install_repository_url }}{{ base_repo_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" repo_gpgcheck: no sslverify: no @@ -22,6 +27,7 @@ - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - scale_install_repository_url is defined - scale_install_repository_url != 'existing' + # # Configure apt repository # diff --git a/roles/gui_upgrade/tasks/install_repository.yml b/roles/gui_upgrade/tasks/install_repository.yml index c1f1fb00..d548e2d3 100644 --- a/roles/gui_upgrade/tasks/install_repository.yml +++ b/roles/gui_upgrade/tasks/install_repository.yml @@ -1,6 +1,13 @@ --- # YUM repository installation method +- name: initialize + set_fact: + gui_url: "gpfs_rpms/" + +- set_fact: + gui_url: "" + when: scale_baseurl is defined # # Configure YUM repository # @@ -8,7 +15,7 @@ yum_repository: name: spectrum-scale-gui description: IBM Spectrum Scale (GUI) - baseurl: "{{ scale_install_repository_url }}gpfs_rpms/" + baseurl: "{{ scale_install_repository_url }}{{ gui_url }}" gpgcheck: "{{ scale_install_gpgcheck }}" repo_gpgcheck: no sslverify: no diff --git a/roles/hdfs_prepare/tasks/java_home.yml b/roles/hdfs_prepare/tasks/java_home.yml index 08a11d62..5b8ac881 100644 --- a/roles/hdfs_prepare/tasks/java_home.yml +++ b/roles/hdfs_prepare/tasks/java_home.yml @@ -66,4 +66,51 @@ msg: "JAVA_HOME not set properly" when: - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list - - jvm_list.rc != 0 \ No newline at end of file + - jvm_list.rc != 0 + +- name: check | Fetch hdfs extracted tar + set_fact: + hdfs_dependency_jars_dir: "hadoop-3.1.4" + +- name: Check and fetch gpfs.hdfs-protocol version + shell: "rpm -q gpfs.hdfs-protocol --qf %{VERSION}-%{RELEASE}" + register: gpfs_hdfs_protocol_version + when: + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list + - transparency_322_enabled|bool + ignore_errors: true + +- debug: + msg: "gpfs_hdfs_protocol_version: {{ gpfs_hdfs_protocol_version}}" + +- name: Check gpfs.hdfs-protocol version for standalone installation + fail: + msg: > + "Standalone installation of gpfs.hdfs-protocol version is not supported. It can only be upgraded" + " from gpfs.hdfs-protocol version 3.2.2-5. For additional information, refer to the documentation available at the following link:" + " https://www.ibm.com/docs/en/storage-scale-bda?topic=hdfs-setup-transparency-cluster." + when: + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list + - transparency_322_enabled|bool + - gpfs_hdfs_protocol_version.rc == 0 + - gpfs_hdfs_protocol_version.stdout_lines[0] < '3.2.2-5' + +- debug: + msg: "hdfs_dependency_jars_dir: {{ hdfs_dependency_jars_dir }}" + +- name: check | verify dependency jars + command: "ls /opt/hadoop/jars/{{ hdfs_dependency_jars_dir }}" + register: dep_jars + when: + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list + - transparency_322_enabled|bool == False + +- fail: + msg: > + "Dependency jars not exist in /opt/hadoop/jars directory, which are essential prerequisites, For further details, " + "please consult the documentation via the following link: https://www.ibm.com/docs/en/storage-scale-bda?topic=hdfs-setup" + when: + - ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list + - transparency_322_enabled|bool == False + - dep_jars.rc != 0 + diff --git a/roles/hdfs_upgrade/tasks/prepare_env.yml b/roles/hdfs_upgrade/tasks/prepare_env.yml index e58acfef..ff52a973 100644 --- a/roles/hdfs_upgrade/tasks/prepare_env.yml +++ b/roles/hdfs_upgrade/tasks/prepare_env.yml @@ -20,7 +20,7 @@ - name: set_fact: - transparency_33_enabled: "{{ transparency_version.stdout|bool }}" + transparency_33_enabled: "{{ transparency_version_33.stdout|bool }}" when: - transparency_version_33.stdout is defined - transparency_version_33.stdout|bool @@ -29,7 +29,7 @@ - name: set_fact: - transparency_322_enabled: "{{ transparency_version.stdout|bool }}" + transparency_322_enabled: "{{ transparency_version_322.stdout|bool }}" when: - transparency_version_322.stdout is defined - transparency_version_322.stdout|bool diff --git a/roles/perfmon_upgrade/tasks/install_repository.yml b/roles/perfmon_upgrade/tasks/install_repository.yml index b10f1124..e47f6cb5 100644 --- a/roles/perfmon_upgrade/tasks/install_repository.yml +++ b/roles/perfmon_upgrade/tasks/install_repository.yml @@ -79,6 +79,10 @@ - ansible_distribution in scale_ubuntu_distribution - scale_version >= "5.1.4.0" +- set_fact: + scale_zimon_url: "" + when: scale_baseurl is defined + - name: upgrade | Configure ZIMon YUM repository yum_repository: name: spectrum-scale-zimon diff --git a/roles/remotemount_configure/defaults/main.yml b/roles/remotemount_configure/defaults/main.yml index 75c0da42..109ae4b8 100644 --- a/roles/remotemount_configure/defaults/main.yml +++ b/roles/remotemount_configure/defaults/main.yml @@ -16,6 +16,8 @@ scale_remotemount_endpoint: "{{ scale_remotemount_scalemgmt_endpoint }}/remotemo scale_remotemount_validate_certs_uri: 'no' +scale_cluster_authorization_cleanup: true + # Temporary Storage for Public Key, Only used when debuging scale_remotemount_client_access_key: /tmp/client_cluster.pub @@ -87,4 +89,4 @@ scale_remotemount_storage_contactnodes_filter: '?fields=roles.gatewayNode%2Cnetw # "managerNode": false, # "otherNodeRoles": "perfmonNode", # "quorumNode": true, -# "snmpNode": false \ No newline at end of file +# "snmpNode": false diff --git a/roles/remotemount_configure/tasks/cleanup_remote_mount.yml b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml index 3275549c..f792d56b 100644 --- a/roles/remotemount_configure/tasks/cleanup_remote_mount.yml +++ b/roles/remotemount_configure/tasks/cleanup_remote_mount.yml @@ -88,7 +88,9 @@ # We want to delete the one where the owningCluster name matches what we are trying to do a remote mount on - name: Cleanup | Client Cluster (access) | Delete the Remote Mount/clusters connection on a loop. include_tasks: delete_remote_cluster.yml - when: item.owningCluster == owning_cluster_name + when: + - item.owningCluster == owning_cluster_name + - scale_cluster_authorization_cleanup is defined and scale_cluster_authorization_cleanup | bool loop: "{{ remote_clusters_result.json.owningClusters }}" run_once: True @@ -139,7 +141,9 @@ retries: "{{ scale_remotemount_restapi_retries_count }}" delay: "{{ scale_remotemount_restapi_retries_delay }}" #when: not remote_clusters_results.failed - when: remote_clusters_results.json.status.code == 200 + when: + - remote_clusters_results.json.status.code == 200 + - scale_cluster_authorization_cleanup is defined and scale_cluster_authorization_cleanup | bool run_once: True - name: "Cleanup | Storage Cluster (owner) | Output from delete the Client Cluster, ('{{ access_cluster_name }}')" @@ -147,4 +151,5 @@ debug: msg: "The is no Client/Accessing cluster named: ({{ access_cluster_name }}) - Message from RestAPI: {{ remote_clusters_results.json.status.message }}" when: - - remote_clusters_results.json.status.code == 400 \ No newline at end of file + - remote_clusters_results.json.status.code == 400 + - scale_cluster_authorization_cleanup is defined and scale_cluster_authorization_cleanup | bool diff --git a/roles/remotemount_configure/tasks/remotecluster.yml b/roles/remotemount_configure/tasks/remotecluster.yml index 99e3e03e..92af1ab7 100644 --- a/roles/remotemount_configure/tasks/remotecluster.yml +++ b/roles/remotemount_configure/tasks/remotecluster.yml @@ -288,7 +288,7 @@ owning_nodes_name: "{{ owning_nodes_name + [item.adminNodeName] }}" with_items: "{{ owning_cluster_nodes.json.nodes }}" run_once: True - + # # This Section is when using daemonNodeName # @@ -427,19 +427,21 @@ ignore_errors: true run_once: True - - name: "Mount Filesystem | Storage Cluster (owner) | Check if filesystems is accessible on Client Cluster ('{{ access_cluster_name }}') - Debug" run_once: True debug: - msg: "{{ remote_clusters_results.json.remoteClusters[0] | json_query('owningClusterFilesystems[*].filesystem') | join(', ') }}" - when: scale_remotemount_debug | bool + msg: "{{ remote_clusters_results.json.remoteClusters[0].owningClusterFilesystems| map(attribute='filesystem') | list | join(', ') }}" + when: + - scale_remotemount_debug | bool + - remote_clusters_results.json.remoteClusters[0].owningClusterFilesystems is defined # Set current filesystem access and run only the access task on filesystem, this is a list # this could also be changed to a import_task with loop. - set_fact: - current_scale_remotemount_storage_filesystem_name: "{{ remote_clusters_results.json.remoteClusters[0] | json_query('owningClusterFilesystems[*].filesystem') | join(', ') }}" + current_scale_remotemount_storage_filesystem_name: "{{ remote_clusters_results.json.remoteClusters[0].owningClusterFilesystems| map(attribute='filesystem') | list | join(', ') }}" run_once: True + when: remote_clusters_results.json.remoteClusters[0].owningClusterFilesystems is defined - name: Step 6 - Configure and Mount filesystems debug: diff --git a/roles/s3_configure/defaults/main.yml b/roles/s3_configure/defaults/main.yml new file mode 100644 index 00000000..98072898 --- /dev/null +++ b/roles/s3_configure/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# Default variables for the IBM Spectrum Scale (S3) cluster role - +# either edit this file or define your own variables to override the defaults diff --git a/roles/s3_configure/meta/main.yml b/roles/s3_configure/meta/main.yml new file mode 100644 index 00000000..597e8c86 --- /dev/null +++ b/roles/s3_configure/meta/main.yml @@ -0,0 +1,21 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.s3_prepare + - ibm.spectrum_scale.ces_common diff --git a/roles/s3_configure/tasks/configure.yml b/roles/s3_configure/tasks/configure.yml new file mode 100644 index 00000000..28cb6e55 --- /dev/null +++ b/roles/s3_configure/tasks/configure.yml @@ -0,0 +1,6 @@ +--- +- name: configure | Enable S3 + shell: + cmd: "{{ scale_command_path }}mmces service enable S3" + delegate_to: "{{ scale_s3_node_list.0 }}" + run_once: true diff --git a/roles/s3_configure/tasks/main.yml b/roles/s3_configure/tasks/main.yml new file mode 100644 index 00000000..c5456b08 --- /dev/null +++ b/roles/s3_configure/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for cluster +- import_tasks: configure.yml + when: scale_protocols is defined and scale_protocols.s3|bool + tags: config diff --git a/roles/s3_configure/vars/main.yml b/roles/s3_configure/vars/main.yml new file mode 100644 index 00000000..bf902af2 --- /dev/null +++ b/roles/s3_configure/vars/main.yml @@ -0,0 +1,6 @@ +--- +# Variables for the IBM Spectrum Scale (S3) role - +# these variables are *not* meant to be overridden + +# default mm command exection path +scale_command_path: /usr/lpp/mmfs/bin/ diff --git a/roles/s3_install/defaults/main.yml b/roles/s3_install/defaults/main.yml new file mode 100644 index 00000000..a694d569 --- /dev/null +++ b/roles/s3_install/defaults/main.yml @@ -0,0 +1,20 @@ +--- +# Default variables for the IBM Spectrum Scale (S3) role - +# either edit this file or define your own variables to override the defaults + +## Specify the URL of the (existing) Spectrum Scale YUM/apt/zypper repository +#scale_install_s3_repository_rpms: http:///s3_rpms/ +#scale_install_s3_repository_debs: http:///s3_debs/ +#scale_install_s3_repository_rpms_sles: http:///s3_rpms/sles12/ + +## List of S3 packages to install +scale_s3_packages: +- noobaa-core +- gpfs.mms3 + +## Temporary directory to copy installation package to +## (local package installation method) +scale_install_localpkg_tmpdir_path: /tmp + +## Flag to install s3 debug package +scale_s3_install_debuginfo: true diff --git a/roles/s3_install/handlers/main.yml b/roles/s3_install/handlers/main.yml new file mode 100644 index 00000000..2e896124 --- /dev/null +++ b/roles/s3_install/handlers/main.yml @@ -0,0 +1,4 @@ +--- +# handlers file for node +- name: yum-clean-metadata + command: yum clean metadata diff --git a/roles/s3_install/meta/main.yml b/roles/s3_install/meta/main.yml new file mode 100644 index 00000000..fb0757e1 --- /dev/null +++ b/roles/s3_install/meta/main.yml @@ -0,0 +1,21 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: + - ibm.spectrum_scale.core_common + - ibm.spectrum_scale.s3_prepare diff --git a/roles/s3_install/tasks/apt/install.yml b/roles/s3_install/tasks/apt/install.yml new file mode 100644 index 00000000..ed779e23 --- /dev/null +++ b/roles/s3_install/tasks/apt/install.yml @@ -0,0 +1,15 @@ +--- +- name: Install GPFS s3 packages + package: + name: "{{ scale_install_all_packages }}" + state: present + when: scale_install_repository_url is defined and inventory_hostname in scale_s3_node_list + + +- name: install| Install GPFS S3 deb + apt: + deb: "{{ item }}" + state: present + when: scale_install_repository_url is not defined and inventory_hostname in scale_s3_node_list + with_items: + - "{{ scale_install_all_packages }}" diff --git a/roles/s3_install/tasks/install.yml b/roles/s3_install/tasks/install.yml new file mode 100644 index 00000000..1f620840 --- /dev/null +++ b/roles/s3_install/tasks/install.yml @@ -0,0 +1,69 @@ +--- +# Install or update RPMs +# Ensure that installation method was chosen during previous role +- block: + - name: install | Check for repository installation method + set_fact: + scale_installmethod: repository + when: + - scale_install_repository_url is defined + + - name: install | Check for localpkg installation method + set_fact: + scale_installmethod: local_pkg + when: + - scale_install_repository_url is undefined + - scale_install_remotepkg_path is undefined + - scale_install_localpkg_path is defined + + - name: install | Check for remotepkg installation method + set_fact: + scale_installmethod: remote_pkg + when: + - scale_install_repository_url is undefined + - scale_install_remotepkg_path is defined + + - name: install | Check for directory package installation method + set_fact: + scale_installmethod: dir_pkg + when: + - scale_install_repository_url is undefined + - scale_install_remotepkg_path is undefined + - scale_install_localpkg_path is undefined + - scale_install_directory_pkg_path is defined + + - name: install | Check installation method + assert: + that: scale_installmethod is defined + msg: >- + Please set the appropriate variable 'scale_install_*' for your desired + installation method! + run_once: true + delegate_to: localhost + +# Run chosen installation method to get list of RPMs + +- name: install | Initialize list of packages + set_fact: + scale_install_all_packages: [] + +- name: install | Set the extracted package directory path + set_fact: + s3_extracted_path: "{{ scale_extracted_path }}" + +- name: install | Stat extracted packages directory + stat: + path: "{{ s3_extracted_path }}" + register: scale_extracted_gpfs_dir + +- include_tasks: install_{{ scale_installmethod }}.yml + +- import_tasks: apt/install.yml + when: ansible_distribution in scale_ubuntu_distribution + +- import_tasks: yum/install.yml + when: ansible_distribution in scale_rhel_distribution + +- import_tasks: zypper/install.yml + when: ansible_distribution in scale_sles_distribution + diff --git a/roles/s3_install/tasks/install_dir_pkg.yml b/roles/s3_install/tasks/install_dir_pkg.yml new file mode 100644 index 00000000..0dc1730a --- /dev/null +++ b/roles/s3_install/tasks/install_dir_pkg.yml @@ -0,0 +1,77 @@ +--- +# Dir package installation method + +- block: ## run_once: true + - name: install | Stat directory installation package + stat: + path: "{{ scale_install_directory_pkg_path }}" + register: scale_install_dirpkg + + - name: install | Check directory installation package + assert: + that: scale_install_dirpkg.stat.exists + msg: >- + Please set the variable 'scale_install_directory_pkg_path' to point to the + local installation package (accessible on Ansible control machine)! + run_once: true + delegate_to: localhost + +- name: install| Creates default directory + file: + path: "{{ scale_extracted_path }}" + state: directory + mode: a+x + recurse: yes + +- name: install | Stat extracted packages + stat: + path: "{{ scale_extracted_path + '/' + scale_install_directory_pkg_path | basename }}" + register: scale_install_gpfs_packagedir + +# +# Copy installation directory package to default +# +- block: + - name: install | Copy installation package to node + copy: + src: "{{ scale_install_directory_pkg_path }}" + dest: "{{ scale_extracted_path }}" + mode: a+x + +- name: install | Set installation package path + set_fact: + dir_path: "{{ scale_extracted_path + '/' + scale_install_directory_pkg_path | basename }}" + +- name: install | gpfs base path + set_fact: + gpfs_path_url: "{{ dir_path }}" + when: scale_install_directory_pkg_path is defined + +# +# Find noobaa-core +# +# + +- block: ## when: host is defined as a protocol node + + - name: install | Find noobaa-core (noobaa-core) package + find: + paths: "{{ gpfs_path_url }}" + patterns: noobaa-core* + register: scale_install_gpfs_s3 + + - name: install | Check valid GPFS (s3) package + assert: + that: scale_install_gpfs_s3.matched > 0 + msg: "No S3 (noobaa-core) package found {{ gpfs_path_url }}noobaa-core*" + + - name: install | Add GPFS s3 package to list + vars: + current_package: "{{ item.path }}" + set_fact: + scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" + with_items: + - "{{ scale_install_gpfs_s3.files }}" + +- debug: + msg: "{{ scale_install_all_packages }}" diff --git a/roles/s3_install/tasks/install_local_pkg.yml b/roles/s3_install/tasks/install_local_pkg.yml new file mode 100644 index 00000000..27606923 --- /dev/null +++ b/roles/s3_install/tasks/install_local_pkg.yml @@ -0,0 +1,137 @@ +--- +# Local package installation method +- block: ## run_once: true + - name: install | Stat local installation package + stat: + path: "{{ scale_install_localpkg_path }}" + checksum_algorithm: md5 + register: scale_install_localpkg + + - name: install | Check local installation package + assert: + that: scale_install_localpkg.stat.exists + msg: >- + Please set the variable 'scale_install_localpkg_path' to point to the + local installation package (accessible on Ansible control machine)! + +# +# Optionally, verify package checksum +# + - name: install | Stat checksum file + stat: + path: "{{ scale_install_localpkg_path }}.md5" + register: scale_install_md5_file + + - block: ## when: scale_install_md5_file.stat.exists + - name: install | Read checksum from file + set_fact: + scale_install_md5_sum: "{{ lookup('file', scale_install_localpkg_path + '.md5') }}" + + - name: install | Compare checksums + assert: + that: scale_install_md5_sum.strip().split().0 == scale_install_localpkg.stat.checksum + msg: >- + Checksums don't match. Please check integritiy of your local + installation package! + + when: scale_install_md5_file.stat.exists + run_once: true + delegate_to: localhost + +# +# Copy installation package +# +- name: install | Stat extracted packages + stat: + path: "{{ s3_extracted_path }}" + register: scale_install_gpfs_rpmdir + +- block: ## when: not scale_install_gpfs_rpmdir.stat.exists + - name: install | Stat temporary directory + stat: + path: "{{ scale_install_localpkg_tmpdir_path }}" + register: scale_install_localpkg_tmpdir + + - name: install | Check temporary directory + assert: + that: + - scale_install_localpkg_tmpdir.stat.exists + - scale_install_localpkg_tmpdir.stat.isdir + msg: >- + Please set the variable 'scale_install_localpkg_tmpdir_path' to point + to a temporary directory on the remote system! + + - name: install | Copy installation package to node + copy: + src: "{{ scale_install_localpkg_path }}" + dest: "{{ scale_install_localpkg_tmpdir_path }}" + mode: a+x + when: not scale_install_gpfs_rpmdir.stat.exists + +# +# Extract installation package +# +- name: install | Extract installation package + vars: + localpkg: "{{ scale_install_localpkg_tmpdir_path + '/' + scale_install_localpkg_path | basename }}" + command: "{{ localpkg + ' --silent' }}" + args: + creates: "{{ s3_extracted_path }}" + +- name: install | Stat extracted packages + stat: + path: "{{ s3_extracted_path }}" + register: scale_install_gpfs_rpmdir + +- name: install | Check extracted packages + assert: + that: + - scale_install_gpfs_rpmdir.stat.exists + - scale_install_gpfs_rpmdir.stat.isdir + msg: >- + The variable 'scale_version' doesn't seem to match the contents of the + local installation package! + +# Delete installation package +- name: install | Delete installation package from node + file: + path: "{{ scale_install_localpkg_tmpdir_path + '/' + scale_install_localpkg_path | basename }}" + state: absent + +- name: install | s3 path + set_fact: + scale_s3_url: 's3_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | s3 path + set_fact: + scale_s3_url: 's3_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + +# Find s3 rpms +- block: ## when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution + + - name: install | Find noobaa-core (noobaa-core) package + find: + paths: "{{ s3_extracted_path }}/{{ scale_s3_url }}" + patterns: noobaa-core* + register: scale_install_gpfs_s3 + + - name: install | Check valid (noobaa-core) package + assert: + that: scale_install_gpfs_s3.matched > 0 + msg: "No noobaa-core (noobaa-core) package found {{ s3_extracted_path }}/{{ scale_s3_url }}noobaa-core*" + + - name: install | Add noobaa-core package to list + vars: + current_package: "{{ item.path }}" + set_fact: + scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" + with_items: + - "{{ scale_install_gpfs_s3.files }}" + + when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution + + +- debug: + msg: "{{ scale_install_all_packages }}" diff --git a/roles/s3_install/tasks/install_remote_pkg.yml b/roles/s3_install/tasks/install_remote_pkg.yml new file mode 100644 index 00000000..56227dc2 --- /dev/null +++ b/roles/s3_install/tasks/install_remote_pkg.yml @@ -0,0 +1,109 @@ +--- +# Remote package installation method + +- name: install | Stat remote installation package + stat: + path: "{{ scale_install_remotepkg_path }}" + checksum_algorithm: md5 + register: scale_install_remotepkg + +- name: install | Check remote installation package + assert: + that: scale_install_remotepkg.stat.exists + msg: >- + Please set the variable 'scale_install_remotepkg_path' to point to the + remote installation package (accessible on Ansible managed node)! + +# +# Optionally, verify package checksum +# +- name: install | Stat checksum file + stat: + path: "{{ scale_install_remotepkg_path }}.md5" + register: scale_install_md5_file + +- block: ## when: scale_install_md5_file.stat.exists + - name: install | Read checksum from file + slurp: + src: "{{ scale_install_remotepkg_path }}.md5" + register: scale_install_md5_sum + + - name: install | Compare checksums + vars: + md5sum: "{{ scale_install_md5_sum.content | b64decode }}" + assert: + that: md5sum.strip().split().0 == scale_install_remotepkg.stat.checksum + msg: >- + Checksums don't match. Please check integritiy of your remote + installation package! + when: scale_install_md5_file.stat.exists + +# +# Extract installation package +# +- name: install | Stat extracted packages + stat: + path: "{{ s3_extracted_path }}" + register: scale_install_gpfs_rpmdir + +- name: install | Make installation package executable + file: + path: "{{ scale_install_remotepkg_path }}" + mode: a+x + when: not scale_install_gpfs_rpmdir.stat.exists + +- name: install | Extract installation package + command: "{{ scale_install_remotepkg_path + ' --silent' }}" + args: + creates: "{{ s3_extracted_path }}" + +- name: install | Stat extracted packages + stat: + path: "{{ s3_extracted_path }}" + register: scale_install_gpfs_rpmdir + +- name: install | Check extracted packages + assert: + that: + - scale_install_gpfs_rpmdir.stat.exists + - scale_install_gpfs_rpmdir.stat.isdir + msg: >- + The variable 'scale_version' doesn't seem to match the contents of the + remote installation package! + +- name: install | s3 path + set_fact: + scale_s3_url: 's3_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | s3 path + set_fact: + scale_s3_url: 's3_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + +# Find s3 rpms +- block: ## when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution + + - name: install | Find noobaa-core (noobaa-core) package + find: + paths: "{{ s3_extracted_path }}/{{ scale_s3_url }}" + patterns: noobaa-core* + register: scale_install_gpfs_s3 + + - name: install | Check valid noobaa-core (noobaa-core) package + assert: + that: scale_install_gpfs_s3.matched > 0 + msg: "No S3 (noobaa-core) package found {{ s3_extracted_path }}/{{ scale_s3_url }}gpfs.s3*" + + - name: install | Add GPFS s3 package to list + vars: + current_package: "{{ item.path }}" + set_fact: + scale_install_all_packages: "{{ scale_install_all_packages + [ current_package ] }}" + with_items: + - "{{ scale_install_gpfs_s3.files }}" + + when: ansible_distribution in scale_rhel_distribution or ansible_distribution in scale_sles_distribution + +- debug: + msg: "{{ scale_install_all_packages }}" diff --git a/roles/s3_install/tasks/install_repository.yml b/roles/s3_install/tasks/install_repository.yml new file mode 100644 index 00000000..9174e910 --- /dev/null +++ b/roles/s3_install/tasks/install_repository.yml @@ -0,0 +1,31 @@ +--- +- name: install | s3 path + set_fact: + scale_s3_url: 's3_rpms/rhel8/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '8' + +- name: install | s3 path + set_fact: + scale_s3_url: 's3_rpms/rhel9/' + when: ansible_distribution in scale_rhel_distribution and ansible_distribution_major_version == '9' + +- name: install | Configure s3 YUM repository + yum_repository: + name: spectrum-scale-s3 + description: IBM Spectrum Scale (s3) + baseurl: "{{ scale_install_repository_url }}{{ scale_s3_url }}" + gpgcheck: "{{ scale_install_gpgcheck }}" + repo_gpgcheck: no + sslverify: no + state: present + notify: yum-clean-metadata + when: + - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + - scale_install_repository_url is defined + - scale_install_repository_url != 'existing' + +- name: install | Add GPFS s3 packages to list + set_fact: + scale_install_all_packages: "{{ scale_install_all_packages + [ item ] }}" + with_items: + - "{{ scale_s3_packages }}" diff --git a/roles/s3_install/tasks/main.yml b/roles/s3_install/tasks/main.yml new file mode 100644 index 00000000..c5302963 --- /dev/null +++ b/roles/s3_install/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# Install IBM Spectrum Scale (S3) +- import_tasks: install.yml + when: scale_protocols is defined and (scale_protocols.s3|bool) + tags: install diff --git a/roles/s3_install/tasks/yum/install.yml b/roles/s3_install/tasks/yum/install.yml new file mode 100644 index 00000000..a86a4efd --- /dev/null +++ b/roles/s3_install/tasks/yum/install.yml @@ -0,0 +1,7 @@ +--- +- name: install | Install GPFS S3 packages + yum: + name: "{{ scale_install_all_packages }}" + state: present + disable_gpg_check: "{{ scale_disable_gpgcheck }}" + when: inventory_hostname in scale_s3_node_list diff --git a/roles/s3_install/tasks/zypper/install.yml b/roles/s3_install/tasks/zypper/install.yml new file mode 100644 index 00000000..b7e45d77 --- /dev/null +++ b/roles/s3_install/tasks/zypper/install.yml @@ -0,0 +1,7 @@ +--- +- name: install | Install GPFS S3 packages + zypper: + name: "{{ scale_install_all_packages }}" + state: present + disable_gpg_check: no + when: inventory_hostname in scale_s3_node_list diff --git a/roles/s3_install/vars/main.yml b/roles/s3_install/vars/main.yml new file mode 100644 index 00000000..5a6e9c01 --- /dev/null +++ b/roles/s3_install/vars/main.yml @@ -0,0 +1,10 @@ +--- +# Variables for the IBM Spectrum Scale (GPFS) role - +# these variables are *not* meant to be overridden + +## Compute RPM version from Spectrum Scale version +scale_rpmversion: "{{ scale_version | regex_replace('^([0-9.]+)\\.([0-9])$', '\\1-\\2') }}" + +## Default scale extraction path +scale_extracted_default_path: "/usr/lpp/mmfs" +scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}" diff --git a/roles/s3_prepare/meta/main.yml b/roles/s3_prepare/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/s3_prepare/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/s3_prepare/tasks/check.yml b/roles/s3_prepare/tasks/check.yml new file mode 100644 index 00000000..684a1032 --- /dev/null +++ b/roles/s3_prepare/tasks/check.yml @@ -0,0 +1,22 @@ +--- +- name: check | Initialize + set_fact: + scale_s3_node_list: [] + +- name: check | Collect all s3 nodes + set_fact: + scale_s3_node_list: "{{ scale_s3_node_list + [hostvars[item]['inventory_hostname']] }}" + when: hostvars[item]['scale_protocol_node'] is defined and hostvars[item]['scale_protocol_node']|bool + with_items: + - "{{ ansible_play_hosts }}" + delegate_to: localhost + run_once: true + +- name: check | Check if atleast one s3 node is configured + assert: + that: + - scale_s3_node_list|length > 0 + fail_msg: "No s3 nodes configured" + +- debug: + msg: "S3 precheck ok" diff --git a/roles/s3_prepare/tasks/main.yml b/roles/s3_prepare/tasks/main.yml new file mode 100644 index 00000000..2a3d4555 --- /dev/null +++ b/roles/s3_prepare/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for precheck +- import_tasks: check.yml + when: scale_protocols is defined and (scale_protocols.s3|bool) + tags: prepare diff --git a/roles/s3_verify/defaults/main.yml b/roles/s3_verify/defaults/main.yml new file mode 100644 index 00000000..344228e7 --- /dev/null +++ b/roles/s3_verify/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# Default variables for the IBM Spectrum Scale (S3) postcheck role - +# either edit this file or define your own variables to override the defaults diff --git a/roles/s3_verify/meta/main.yml b/roles/s3_verify/meta/main.yml new file mode 100644 index 00000000..dab8063f --- /dev/null +++ b/roles/s3_verify/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: IBM Corporation + description: Highly-customizable Ansible role for installing and configuring IBM Spectrum Scale (GPFS) + company: IBM + + license: Apache-2.0 + + min_ansible_version: 2.9 + + platforms: + - name: EL + versions: + - 7 + - 8 + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/s3_verify/tasks/check.yml b/roles/s3_verify/tasks/check.yml new file mode 100644 index 00000000..7efc08f3 --- /dev/null +++ b/roles/s3_verify/tasks/check.yml @@ -0,0 +1,14 @@ +--- +- name: postcheck | Check if S3 is running + shell: + cmd: "{{ scale_command_path }}mmces service list|grep S3" + register: scale_s3_status + when: inventory_hostname in scale_s3_node_list + failed_when: false + +- name: postcheck | Check if s3 is running + assert: + that: + - scale_s3_status.rc == 0 + fail_msg: "S3 is not active on {{ ansible_hostname }}" + when: inventory_hostname in scale_s3_node_list diff --git a/roles/s3_verify/tasks/main.yml b/roles/s3_verify/tasks/main.yml new file mode 100644 index 00000000..3b56eae8 --- /dev/null +++ b/roles/s3_verify/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for postcheck +- import_tasks: check.yml + when: protocols is defined and protocols.s3|bool + tags: always diff --git a/roles/s3_verify/vars/main.yml b/roles/s3_verify/vars/main.yml new file mode 100644 index 00000000..bf902af2 --- /dev/null +++ b/roles/s3_verify/vars/main.yml @@ -0,0 +1,6 @@ +--- +# Variables for the IBM Spectrum Scale (S3) role - +# these variables are *not* meant to be overridden + +# default mm command exection path +scale_command_path: /usr/lpp/mmfs/bin/