From d57cce6a3b479df1ef278837fa044e1ea30b68a9 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Thu, 26 Sep 2024 15:53:36 +0300 Subject: [PATCH 1/9] Correct multiple spelling errors to improve readability --- CHANGELOG.md | 4 +- CHANGELOG.rst | 2 +- CONTRIBUTING.md | 4 +- README.md | 402 +++--- changelogs/changelog.yaml | 810 ++++++------ examples/acp.yml | 5 +- examples/fc/fc.yml | 2 +- .../node_discovery_network_info.yml | 32 +- examples/images.yml | 2 +- examples/karbon/create_registries.yml | 2 +- examples/karbon/registries_info.yml | 2 +- examples/ndb/README.md | 2 +- examples/ndb/db_server_vms.yml | 17 +- ...rovision_postgres_ha_instance_with_ips.yml | 4 +- examples/roles_crud.yml | 2 +- examples/vm.yml | 102 +- examples/vm_update.yml | 2 +- plugins/doc_fragments/ntnx_operations.py | 4 +- .../ndb/database_engines/database_engine.py | 2 +- plugins/module_utils/prism/static_routes.py | 2 +- plugins/modules/ntnx_acps.py | 4 +- plugins/modules/ntnx_ndb_clusters.py | 2 +- plugins/modules/ntnx_ndb_maintenance_tasks.py | 4 +- .../modules/ntnx_ndb_maintenance_window.py | 12 +- .../ntnx_ndb_maintenance_windows_info.py | 2 +- plugins/modules/ntnx_ndb_profiles.py | 2 +- plugins/modules/ntnx_ndb_register_database.py | 2 +- plugins/modules/ntnx_protection_rules.py | 2 +- plugins/modules/ntnx_recovery_plans.py | 2 +- plugins/modules/ntnx_service_groups.py | 4 +- plugins/modules/ntnx_static_routes.py | 2 +- plugins/modules/ntnx_vms.py | 26 +- plugins/modules/ntnx_vpcs.py | 4 +- .../targets/ntnx_acps/tasks/create_acps.yml | 11 +- .../targets/ntnx_acps/tasks/delete_acp.yml | 15 +- .../ntnx_address_groups/tasks/create.yml | 4 +- .../ntnx_address_groups/tasks/delete.yml | 3 +- .../ntnx_address_groups/tasks/update.yml | 9 +- .../ntnx_categories/tasks/all_operations.yml | 2 +- .../ntnx_foundation/tasks/image_nodes.yml | 127 +- .../tasks/negative_scenarios.yml | 226 ++-- .../tasks/get_aos.yml | 2 +- .../tasks/configure_ipmi.yml | 2 +- .../tasks/image_nodes.yml | 6 +- .../tasks/create_key.yml | 13 +- .../tasks/key_info.yml | 16 +- .../tasks/get_cluster_info.yml | 10 +- .../tasks/get_node_info.yml | 9 +- .../tasks/discover_nodes.yml | 7 +- .../tasks/get_hypervisors.yml | 2 +- .../tasks/negative_scenarios.yml | 6 +- .../tasks/upload.yml | 4 +- .../tasks/get_info.yml | 2 +- .../tasks/image_nodes.yml | 406 +++--- .../tasks/update.yml | 2 +- .../tasks/crud.yml | 2 +- .../tasks/negative_scenarios.yml | 24 +- .../ntnx_karbon_registries/tasks/create.yml | 8 +- .../tasks/negativ_scenarios.yml | 4 +- .../tasks/tests.yml | 2 +- .../targets/ntnx_ndb_clusters/tasks/CRUD.yml | 191 ++- .../ntnx_ndb_database_clones/tasks/clones.yml | 187 ++- .../tasks/all_actions.yml | 250 ++-- .../ntnx_ndb_databases_sanity/tasks/tests.yml | 178 +-- .../tasks/tests.yml | 318 ++--- .../tasks/tests.yml | 112 +- .../ntnx_ndb_db_server_vms/tasks/crud.yml | 332 +++-- .../ntnx_ndb_maintenance_windows/readme.md | 5 +- .../tasks/crud.yml | 54 +- .../ntnx_ndb_profiles/tasks/compute.yml | 2 +- .../ntnx_ndb_profiles/tasks/db_params.yml | 12 +- .../tasks/network_profile.yml | 5 +- .../ntnx_ndb_software_profiles/tasks/crud.yml | 128 +- .../ntnx_ndb_vlans/tasks/create_vlans.yml | 4 +- .../tasks/negativ_scenarios.yml | 27 +- .../targets/ntnx_ova/tasks/create_ova.yml | 44 +- .../ntnx_projects/tasks/create_project.yml | 2 +- .../tasks/projects_with_role_mappings.yml | 69 +- .../ntnx_projects/tasks/update_project.yml | 3 +- .../tasks/protection_rules.yml | 2 +- .../tasks/crud.yml | 571 ++++----- .../targets/ntnx_roles/tasks/create.yml | 4 +- .../targets/ntnx_roles/tasks/delete.yml | 2 +- .../targets/ntnx_roles/tasks/update.yml | 5 +- .../ntnx_security_rules/tasks/app_rule.yml | 2 +- .../tasks/isolation_rule.yml | 50 +- .../tasks/get_security_rules.yml | 26 +- .../ntnx_service_groups/tasks/create.yml | 24 +- .../ntnx_service_groups/tasks/update.yml | 7 +- .../ntnx_static_routes/tasks/create.yml | 8 +- .../ntnx_static_routes_info/tasks/info.yml | 9 +- .../targets/ntnx_user_groups/tasks/create.yml | 2 +- .../targets/ntnx_users/tasks/create.yml | 2 +- .../targets/ntnx_vms_clone/tasks/create.yml | 114 +- .../tasks/list_floating_ips.yml | 6 +- .../tasks/negative_scenarios.yml | 152 +-- .../targets/nutanix_vms/tasks/create.yml | 1106 ++++++++--------- .../targets/nutanix_vms/tasks/delete.yml | 20 +- .../targets/nutanix_vms/tasks/main.yml | 22 +- .../nutanix_vms/tasks/negtaive_scenarios.yml | 568 +++++---- .../nutanix_vms/tasks/negtaive_vm_update.yml | 146 ++- .../nutanix_vms/tasks/vm_operations.yml | 134 +- .../targets/nutanix_vms/tasks/vm_update.yml | 124 +- .../nutanix_vms_info/tasks/list_vms.yml | 11 +- .../nutanix_vpcs/tasks/create_vpcs.yml | 15 +- .../targets/nutanix_vpcs/tasks/delete_vpc.yml | 11 +- .../targets/prepare_ndb_env/vars/main.yml | 4 +- 107 files changed, 3594 insertions(+), 3872 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a50429e32..bd19b8418 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -226,7 +226,7 @@ **Features** -**Foundation Cetral**: +**Foundation Central**: - Ansible module for Foundation Central - Ansible module for API Keys to authenticate with FC - Ansible info module for API Keys @@ -332,6 +332,6 @@ - solve python 2.7 issues [\#41](https://github.com/nutanix/nutanix.ansible/pull/41) - device index calculation fixes, updates for get by name functionality[\#254](https://github.com/nutanix/nutanix.ansible/pull/42) - Client SDK with inventory [\#45](https://github.com/nutanix/nutanix.ansible/pull/45) -- Fix error messages for get_uuid() reponse [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) +- Fix error messages for get_uuid() response [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) **Full Changelog**: [here](https://github.com/nutanix/nutanix.ansible/commits/v1.0.0-beta.1) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d34f34be6..578a0c76b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -100,7 +100,7 @@ New Modules - ntnx_ndb_db_servers_info - info module for ndb db server vms info - ntnx_ndb_linked_databases - module to manage linked databases of a database instance - ntnx_ndb_maintenance_tasks - module to add and remove maintenance related tasks -- ntnx_ndb_maintenance_window - module to create, update and delete mainetance window +- ntnx_ndb_maintenance_window - module to create, update and delete maintenance window - ntnx_ndb_maintenance_windows_info - module for fetching maintenance windows info - ntnx_ndb_profiles - module for create, update and delete of profiles - ntnx_ndb_profiles_info - info module for ndb profiles diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b2822bfb5..b76ea1545 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -83,7 +83,7 @@ ## Workflow 1. Create a github issue with following details - * **Title** should contain one of the follwoing + * **Title** should contain one of the following - [Feat] Develop ansible module for \ - [Imprv] Modify ansible module to support \ - [Bug] Fix \ bug in \ @@ -106,7 +106,7 @@ * `imprv/issue#` * `bug/issue#` -3. Develop `sanity`, `unit` and `integrtaion` tests. +3. Develop `sanity`, `unit` and `integration` tests. 4. Create a [pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) diff --git a/README.md b/README.md index 76b92b3a0..b8dfb5e49 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,13 @@ # Nutanix Ansible + Official nutanix ansible collection # About + Nutanix ansible collection nutanix.ncp is the official Nutanix ansible collection to automate Nutanix Cloud Platform (ncp). It is designed keeping simplicity as the core value. Hence it is + 1. Easy to use 2. Easy to develop @@ -17,12 +20,15 @@ Ansible Nutanix Provider leverages the community-supported model. See [Open Sour # Version compatibility ## Ansible + This collection requires ansible-core>=2.15.0 ## Python + This collection requires Python 3.9 or greater ## Prism Central + > For the 1.1.0 release of the ansible plugin it will have N-2 compatibility with the Prism Central APIs. This release was tested against Prism Central versions pc2022.1.0.2, pc.2021.9.0.5 and pc.2021.8.0.1. > For the 1.2.0 release of the ansible plugin it will have N-2 compatibility with the Prism Central APIs. This release was tested against Prism Central versions pc.2022.4, pc2022.1.0.2 and pc.2021.9.0.5. @@ -43,19 +49,20 @@ This collection requires Python 3.9 or greater > For the 1.9.2 release of the ansible plugin it will have N-1 compatibility with the Prism Central APIs. This release was sanity tested against Prism Central version pc.2024.1 . - ### Notes: + 1. Static routes module (ntnx_static_routes) is supported for PC versions >= pc.2022.1 2. Adding cluster references in projects module (ntnx_projects) is supported for PC versions >= pc.2022.1 3. For Users and User Groups modules (ntnx_users and ntnx_user_groups), adding Identity Provider (IdP) & Organizational Unit (OU) based users/groups are supported for PC versions >= pc.2022.1 -4. ntnx_security_rules - The ``apptier`` option in target group has been removed. New option called ``apptiers`` has been added to support multi tier policy. +4. ntnx_security_rules - The `apptier` option in target group has been removed. New option called `apptiers` has been added to support multi tier policy. Prism Central based examples: https://github.com/nutanix/nutanix.ansible/tree/main/examples/ ## Foundation + > For the 1.1.0 release of the ansible plugin, it will have N-1 compatibility with the Foundation. This release was tested against Foundation versions v5.2 and v5.1.1 > For the 1.9.1 release of the ansible plugin, it was tested against versions v5.2 @@ -63,11 +70,13 @@ Prism Central based examples: https://github.com/nutanix/nutanix.ansible/tree/ma Foundation based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/foundation ## Foundation Central + > For the 1.1.0 release of the ansible plugin, it will have N-1 compatibility with the Foundation Central . This release was tested against Foundation Central versions v1.3 and v1.2 Foundation Central based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/fc ## Karbon + > For the 1.6.0 release of the ansible plugin, it will have N-2 compatibility with the Karbon. This release was tested against Karbon versions v2.3.0, v2.4.0 and v2.5.0 > For the 1.9.0 release of the ansible plugin, it was tested against Karbon versions v2.6.0, v2.7.0 and v2.8.0 @@ -87,9 +96,11 @@ Karbon based examples : https://github.com/nutanix/nutanix.ansible/tree/main/exa NDB based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/ndb ### Notes: + 1. Currently NDB based modules are supported and tested against postgres based databases. # Installing the collection + **Prerequisite** Ansible should be pre-installed. If not, please follow official ansible [install guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) . @@ -98,26 +109,28 @@ For Developers, please follow [this install guide]( **1. Clone the GitHub repository to a local directory** -```git clone https://github.com/nutanix/nutanix.ansible.git``` +`git clone https://github.com/nutanix/nutanix.ansible.git` **2. Git checkout release version** -```git checkout -b ``` +`git checkout -b ` **3. Build the collection** -```ansible-galaxy collection build``` +`ansible-galaxy collection build` **4. Install the collection** -```ansible-galaxy collection install nutanix-ncp-.tar.gz``` +`ansible-galaxy collection install nutanix-ncp-.tar.gz` **Note** Add `--force` option for rebuilding or reinstalling to overwrite existing data # Using this collection -You can either call modules by their Fully Qualified Collection Namespace (FQCN), such as nutanix.ncp.ntnx_vms, or you can call modules by their short name if you list the nutanix.ncp collection in the playbook's ```collections:``` keyword + +You can either call modules by their Fully Qualified Collection Namespace (FQCN), such as nutanix.ncp.ntnx_vms, or you can call modules by their short name if you list the nutanix.ncp collection in the playbook's `collections:` keyword For example, the playbook for iaas.yml is as follows: + ```yaml --- - name: IaaS Provisioning @@ -143,7 +156,9 @@ For example, the playbook for iaas.yml is as follows: - include_role: name: fip ``` + To run this playbook, use ansible-playbook command as follows: + ``` ansible-playbook ansible-playbook examples/iaas/iaas.yml @@ -153,111 +168,112 @@ ansible-playbook examples/iaas/iaas.yml ## Modules -| Name | Description | -| --- | --- | -| ntnx_acps | Create, Update, Delete acp. | -| ntnx_acps_info | Get acp info. | -| ntnx_address_groups | Create, Update, Delete Nutanix address groups. | -| ntnx_address_groups_info | Get address groups info. | -| ntnx_categories | Create, Update, Delete categories | -| ntnx_categories_info | Get categories info. | -| ntnx_clusters_info | Get cluster info. | -| ntnx_floating_ips | Create or delete a Floating Ip. | -| ntnx_floating_ips_info | List existing Floating_Ips. | -| ntnx_hosts_info | Get host info. | -| ntnx_images | Create, update or delete a image. | -| ntnx_images_info | List existing images. | -| ntnx_image_placement_policy | Create, update or delete a image placement policy. | -| ntnx_image_placement_policies_info | List existing image placement policies. | -| ntnx_karbon_clusters | Create, Delete k8s clusters | -| ntnx_karbon_clusters_info | Get clusters info. | -| ntnx_karbon_clusters_node_pools | Update node pools of kubernetes cluster | -| ntnx_karbon_registries | Create, Delete a karbon private registry entry | -| ntnx_karbon_registries_info | Get karbon private registry registry info. | -| ntnx_pbrs | Create or delete a PBR. | -| ntnx_pbrs_info | List existing PBRs. | -| ntnx_permissions_info | List permissions info | -| ntnx_projects | create, update and delete pc projects | -| ntnx_projects_info | Get projects info. | -| ntnx_protection_rules | create, update and delete pc protection rules | -| ntnx_protection_rules_info | Get pc protection rules info. | -| ntnx_recovery_plans | create, update and delete pc recovery plans | -| ntnx_recovery_plans_info | Get pc recovery plans info. | -| ntnx_recovery_plan_jobs | create and perform action on pc recovery plans | -| ntnx_recovery_plan_jobs_info | Get pc recovery plan jobs info. | -| ntnx_roles | Create, Update, Delete Nutanix roles | -| ntnx_roles_info | Get roles info. | -| ntnx_security_rules | Create, update or delete a Security Rule. | -| ntnx_security_rules_info | List existing Security Rules. | -| ntnx_service_groups | Create, Update, Delete service_group | -| ntnx_service_groups_info | Get service groups info. | -| ntnx_static_routes | Update static routes of a vpc. | -| ntnx_static_routes_info | List existing static routes of a vpc. | -| ntnx_subnets | Create or delete a Subnet. | -| ntnx_subnets_info | List existing Subnets. | -| ntnx_user_groups | Create, Delete user_groups. | -| ntnx_user_groups_info | Get user groups info. | -| ntnx_users | Create, Delete users | -| ntnx_users_info | Get users info. | -| ntnx_vms | Create or delete a VM. | -| ntnx_vms_clone | Clone VM. | -| ntnx_vms_ova | Create OVA image from VM. | -| ntnx_vms_info | List existing VMs. | -| ntnx_vpcs | Create or delete a VPC. | -| ntnx_vpcs_info | List existing VPCs. | -| ntnx_foundation | Image nodes and create new cluster. | -| ntnx_foundation_aos_packages_info | List the AOS packages uploaded to Foundation. | -| ntnx_foundation_bmc_ipmi_config | Configure IPMI IP address on BMC of nodes. | -| ntnx_foundation_discover_nodes_info | List the nodes discovered by Foundation. | -| ntnx_foundation_hypervisor_images_info | List the hypervisor images uploaded to Foundation. | -| ntnx_foundation_image_upload | Upload hypervisor or AOS image to Foundation VM. | -| ntnx_foundation_node_network_info | Get node network information discovered by Foundation. | -| ntnx_foundation_central | Create a cluster out of nodes registered with Foundation Central. | -| ntnx_foundation_central_api_keys | Create a new api key which will be used by remote nodes to authenticate with Foundation Central. | -| ntnx_foundation_central_api_keys_info | List all the api keys created in Foundation Central. | -| ntnx_foundation_central_imaged_clusters_info | List all the clusters created using Foundation Central. | -| ntnx_foundation_central_imaged_nodes_info | List all the nodes registered with Foundation Central. | -| ntnx_ndb_databases_info | Get ndb database instance info | -| ntnx_ndb_clones_info | Get ndb database clones info. | -| ntnx_ndb_time_machines_info | Get ndb time machines info. | -| ntnx_ndb_profiles_info | Get ndb profiles info. | -| ntnx_ndb_db_servers_info | Get ndb database server vms info. | -| ntnx_ndb_databases | Create, update and delete database instances. | -| ntnx_ndb_register_database | Register database instance. | -| ntnx_ndb_db_server_vms | Create, update and delete database server vms. | -| ntnx_ndb_clusters_info | Get clusters info. | -| ntnx_ndb_clusters | Create, update and delete clusters in NDB | -| ntnx_ndb_snapshots_info | Get snapshots info | -| ntnx_ndb_vlans | Create, update and delete vlans | -| ntnx_ndb_vlans_info | Get vlans info in NDB | -| ntnx_ndb_stretched_vlans | Get stretched vlans inf in NDB | -| ntnx_ndb_time_machine_clusters | Manage clusters in NDB time machines | -| ntnx_ndb_tags | Create, update and delete tags | -| ntnx_ndb_tags_info | Get tags info | -| ntnx_ndb_database_clones | Create, update and delete database clones | -| ntnx_ndb_database_snapshots | Create, update and delete database snapshots | -| ntnx_ndb_database_clone_refresh | Perform database clone refresh | -| ntnx_ndb_authorize_db_server_vms | authorize database server vms with time machines | -| ntnx_ndb_profiles | create, update and delete all kind of profiles | -| ntnx_ndb_database_log_catchup | perform log catchup | -| ntnx_ndb_database_restore | perform database restore | -| ntnx_ndb_database_scale | perform database scaling | -| ntnx_ndb_linked_databases | Add and remove linked databases of database instance | -| ntnx_ndb_replicate_database_snapshots | replicate snapshots accross clusters in time machines | -| ntnx_ndb_register_db_server_vm | register database server vm | -| ntnx_ndb_maintenance_tasks | Add and remove maintenance tasks in window | -| ntnx_ndb_maintenance_window | Create, update and delete maintenance window | -| ntnx_ndb_maintenance_windows_info | Get maintenance window info | -| ntnx_ndb_slas | Create, update and delete sla | -| ntnx_ndb_slas_info | Get slas info | +| Name | Description | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| ntnx_acps | Create, Update, Delete acp. | +| ntnx_acps_info | Get acp info. | +| ntnx_address_groups | Create, Update, Delete Nutanix address groups. | +| ntnx_address_groups_info | Get address groups info. | +| ntnx_categories | Create, Update, Delete categories | +| ntnx_categories_info | Get categories info. | +| ntnx_clusters_info | Get cluster info. | +| ntnx_floating_ips | Create or delete a Floating Ip. | +| ntnx_floating_ips_info | List existing Floating_Ips. | +| ntnx_hosts_info | Get host info. | +| ntnx_images | Create, update or delete a image. | +| ntnx_images_info | List existing images. | +| ntnx_image_placement_policy | Create, update or delete a image placement policy. | +| ntnx_image_placement_policies_info | List existing image placement policies. | +| ntnx_karbon_clusters | Create, Delete k8s clusters | +| ntnx_karbon_clusters_info | Get clusters info. | +| ntnx_karbon_clusters_node_pools | Update node pools of kubernetes cluster | +| ntnx_karbon_registries | Create, Delete a karbon private registry entry | +| ntnx_karbon_registries_info | Get karbon private registry registry info. | +| ntnx_pbrs | Create or delete a PBR. | +| ntnx_pbrs_info | List existing PBRs. | +| ntnx_permissions_info | List permissions info | +| ntnx_projects | create, update and delete pc projects | +| ntnx_projects_info | Get projects info. | +| ntnx_protection_rules | create, update and delete pc protection rules | +| ntnx_protection_rules_info | Get pc protection rules info. | +| ntnx_recovery_plans | create, update and delete pc recovery plans | +| ntnx_recovery_plans_info | Get pc recovery plans info. | +| ntnx_recovery_plan_jobs | create and perform action on pc recovery plans | +| ntnx_recovery_plan_jobs_info | Get pc recovery plan jobs info. | +| ntnx_roles | Create, Update, Delete Nutanix roles | +| ntnx_roles_info | Get roles info. | +| ntnx_security_rules | Create, update or delete a Security Rule. | +| ntnx_security_rules_info | List existing Security Rules. | +| ntnx_service_groups | Create, Update, Delete service_group | +| ntnx_service_groups_info | Get service groups info. | +| ntnx_static_routes | Update static routes of a vpc. | +| ntnx_static_routes_info | List existing static routes of a vpc. | +| ntnx_subnets | Create or delete a Subnet. | +| ntnx_subnets_info | List existing Subnets. | +| ntnx_user_groups | Create, Delete user_groups. | +| ntnx_user_groups_info | Get user groups info. | +| ntnx_users | Create, Delete users | +| ntnx_users_info | Get users info. | +| ntnx_vms | Create or delete a VM. | +| ntnx_vms_clone | Clone VM. | +| ntnx_vms_ova | Create OVA image from VM. | +| ntnx_vms_info | List existing VMs. | +| ntnx_vpcs | Create or delete a VPC. | +| ntnx_vpcs_info | List existing VPCs. | +| ntnx_foundation | Image nodes and create new cluster. | +| ntnx_foundation_aos_packages_info | List the AOS packages uploaded to Foundation. | +| ntnx_foundation_bmc_ipmi_config | Configure IPMI IP address on BMC of nodes. | +| ntnx_foundation_discover_nodes_info | List the nodes discovered by Foundation. | +| ntnx_foundation_hypervisor_images_info | List the hypervisor images uploaded to Foundation. | +| ntnx_foundation_image_upload | Upload hypervisor or AOS image to Foundation VM. | +| ntnx_foundation_node_network_info | Get node network information discovered by Foundation. | +| ntnx_foundation_central | Create a cluster out of nodes registered with Foundation Central. | +| ntnx_foundation_central_api_keys | Create a new api key which will be used by remote nodes to authenticate with Foundation Central. | +| ntnx_foundation_central_api_keys_info | List all the api keys created in Foundation Central. | +| ntnx_foundation_central_imaged_clusters_info | List all the clusters created using Foundation Central. | +| ntnx_foundation_central_imaged_nodes_info | List all the nodes registered with Foundation Central. | +| ntnx_ndb_databases_info | Get ndb database instance info | +| ntnx_ndb_clones_info | Get ndb database clones info. | +| ntnx_ndb_time_machines_info | Get ndb time machines info. | +| ntnx_ndb_profiles_info | Get ndb profiles info. | +| ntnx_ndb_db_servers_info | Get ndb database server vms info. | +| ntnx_ndb_databases | Create, update and delete database instances. | +| ntnx_ndb_register_database | Register database instance. | +| ntnx_ndb_db_server_vms | Create, update and delete database server vms. | +| ntnx_ndb_clusters_info | Get clusters info. | +| ntnx_ndb_clusters | Create, update and delete clusters in NDB | +| ntnx_ndb_snapshots_info | Get snapshots info | +| ntnx_ndb_vlans | Create, update and delete vlans | +| ntnx_ndb_vlans_info | Get vlans info in NDB | +| ntnx_ndb_stretched_vlans | Get stretched vlans inf in NDB | +| ntnx_ndb_time_machine_clusters | Manage clusters in NDB time machines | +| ntnx_ndb_tags | Create, update and delete tags | +| ntnx_ndb_tags_info | Get tags info | +| ntnx_ndb_database_clones | Create, update and delete database clones | +| ntnx_ndb_database_snapshots | Create, update and delete database snapshots | +| ntnx_ndb_database_clone_refresh | Perform database clone refresh | +| ntnx_ndb_authorize_db_server_vms | authorize database server vms with time machines | +| ntnx_ndb_profiles | create, update and delete all kind of profiles | +| ntnx_ndb_database_log_catchup | perform log catchup | +| ntnx_ndb_database_restore | perform database restore | +| ntnx_ndb_database_scale | perform database scaling | +| ntnx_ndb_linked_databases | Add and remove linked databases of database instance | +| ntnx_ndb_replicate_database_snapshots | replicate snapshots across clusters in time machines | +| ntnx_ndb_register_db_server_vm | register database server vm | +| ntnx_ndb_maintenance_tasks | Add and remove maintenance tasks in window | +| ntnx_ndb_maintenance_window | Create, update and delete maintenance window | +| ntnx_ndb_maintenance_windows_info | Get maintenance window info | +| ntnx_ndb_slas | Create, update and delete sla | +| ntnx_ndb_slas_info | Get slas info | ## Inventory Plugins -| Name | Description | -| --- | --- | +| Name | Description | +| ----------------------- | ---------------------------- | | ntnx_prism_vm_inventory | Nutanix VMs inventory source | # Module documentation and examples + ``` ansible-doc nutanix.ncp. ``` @@ -266,8 +282,8 @@ ansible-doc nutanix.ncp. We glady welcome contributions from the community. From updating the documentation to adding more functions for Ansible, all ideas are welcome. Thank you in advance for all of your issues, pull requests, and comments! -* [Contributing Guide](CONTRIBUTING.md) -* [Code of Conduct](CODE_OF_CONDUCT.md) +- [Contributing Guide](CONTRIBUTING.md) +- [Code of Conduct](CODE_OF_CONDUCT.md) # Testing @@ -276,10 +292,12 @@ We glady welcome contributions from the community. From updating the documentati To conduct integration tests for a specific Ansible module such as the `ntnx_vms` module, the following step-by-step procedures can be followed: ### Prerequisites + - Ensure you are in the installed collection directory where the module is located. For example: -`/Users/mac.user1/.ansible/collections/ansible_collections/nutanix/ncp` + `/Users/mac.user1/.ansible/collections/ansible_collections/nutanix/ncp` ### Setting up Variables + 1. Navigate to the `tests/integration/targets` directory within the collection. 2. Define the necessary variables within the feature-specific var files, such as `tests/integration/targets/prepare_env/vars/main.yml`, `tests/integration/targets/prepare_foundation_env/vars/main.yml`,`tests/integration/targets/prepare_ndb_env/tasks/prepare_env.yml`, etc. @@ -287,39 +305,43 @@ To conduct integration tests for a specific Ansible module such as the `ntnx_vms Note: For Karbon and FC tests, use the PC vars exclusively, as these features rely on pc setup. Not all variables are mandatory; define only the required variables for the particular feature to be tested. 3. Run the test setup playbook for the specific feature you intend to test to create entities in setup: - - For PC, NDB, and Foundation tests, execute the relevant commands: - ```bash - ansible-playbook prepare_env/tasks/prepare_env.yml - ansible-playbook prepare_ndb_env/tasks/prepare_env.yml - ansible-playbook prepare_foundation_env/tasks/prepare_foundation_env.yml - ``` + - For PC, NDB, and Foundation tests, execute the relevant commands: + ```bash + ansible-playbook prepare_env/tasks/prepare_env.yml + ansible-playbook prepare_ndb_env/tasks/prepare_env.yml + ansible-playbook prepare_foundation_env/tasks/prepare_foundation_env.yml + ``` ### Running Integration Tests + 1. Conduct integration tests for all modules using: - ```bash - ansible-test integration - ``` + + ```bash + ansible-test integration + ``` 2. To perform integration tests for a specific module: - ```bash - ansible-test integration module_test_name - ``` - Replace `module_test_name` with test directory name under tests/integration/targets. + ```bash + ansible-test integration module_test_name + ``` + Replace `module_test_name` with test directory name under tests/integration/targets. ### Cleanup + 1. After completing the integration tests, perform a cleanup specific to the tested feature: - - For PC tests, execute the command: - ```bash - ansible-playbook prepare_env/tasks/clean_up.yml - ``` - - For Foundation tests, execute the command: - ```bash - ansible-playbook prepare_foundation_env/tasks/clean_up.yml - ``` + - For PC tests, execute the command: + ```bash + ansible-playbook prepare_env/tasks/clean_up.yml + ``` + - For Foundation tests, execute the command: + ```bash + ansible-playbook prepare_foundation_env/tasks/clean_up.yml + ``` By following these steps, you can perform comprehensive integration testing for the specified Ansible module and ensure a clean testing environment afterward. Define only the necessary variables for the specific feature you intend to test. # Examples + ## Playbook for IaaS provisioning on Nutanix **Refer to [`examples/iaas`](https://github.com/nutanix/nutanix.ansible/tree/main/examples/iaas) for full implementation** @@ -332,46 +354,122 @@ By following these steps, you can perform comprehensive integration testing for collections: - nutanix.ncp vars: - nutanix_host: - nutanix_username: - nutanix_password: - validate_certs: true + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: true tasks: - name: Inputs for external subnets task include_tasks: external_subnet.yml with_items: - - { name: Ext-Nat, vlan_id: 102, ip: 10.44.3.192, prefix: 27, gip: 10.44.3.193, sip: 10.44.3.198, eip: 10.44.3.207, eNat: True } + - { + name: Ext-Nat, + vlan_id: 102, + ip: 10.44.3.192, + prefix: 27, + gip: 10.44.3.193, + sip: 10.44.3.198, + eip: 10.44.3.207, + eNat: True, + } - name: Inputs for vpcs task include_tasks: vpc.yml with_items: - - { name: Prod, subnet_name: Ext-Nat} - - { name: Dev, subnet_name: Ext-Nat} + - { name: Prod, subnet_name: Ext-Nat } + - { name: Dev, subnet_name: Ext-Nat } - name: Inputs for overlay subnets include_tasks: overlay_subnet.yml with_items: - - { name: Prod-SubnetA, vpc_name: Prod , nip: 10.1.1.0, prefix: 24, gip: 10.1.1.1, sip: 10.1.1.2, eip: 10.1.1.5, - domain_name: "calm.nutanix.com", dns_servers : ["8.8.8.8","8.8.8.4"], domain_search: ["calm.nutanix.com","eng.nutanix.com"] } - - { name: Prod-SubnetB, vpc_name: Prod , nip: 10.1.2.0, prefix: 24, gip: 10.1.2.1, sip: 10.1.2.2, eip: 10.1.2.5, - domain_name: "calm.nutanix.com", dns_servers : ["8.8.8.8","8.8.8.4"], domain_search: ["calm.nutanix.com","eng.nutanix.com"] } - - { name: Dev-SubnetA, vpc_name: Dev , nip: 10.1.1.0, prefix: 24, gip: 10.1.1.1, sip: 10.1.1.2, eip: 10.1.1.5, - domain_name: "calm.nutanix.com", dns_servers : ["8.8.8.8","8.8.8.4"], domain_search: ["calm.nutanix.com","eng.nutanix.com"] } - - { name: Dev-SubnetB, vpc_name: Dev , nip: 10.1.2.0, prefix: 24, gip: 10.1.2.1, sip: 10.1.2.2, eip: 10.1.2.5, - domain_name: "calm.nutanix.com", dns_servers : ["8.8.8.8","8.8.8.4"], domain_search: ["calm.nutanix.com","eng.nutanix.com"] } + - { + name: Prod-SubnetA, + vpc_name: Prod, + nip: 10.1.1.0, + prefix: 24, + gip: 10.1.1.1, + sip: 10.1.1.2, + eip: 10.1.1.5, + domain_name: "calm.nutanix.com", + dns_servers: ["8.8.8.8", "8.8.8.4"], + domain_search: ["calm.nutanix.com", "eng.nutanix.com"], + } + - { + name: Prod-SubnetB, + vpc_name: Prod, + nip: 10.1.2.0, + prefix: 24, + gip: 10.1.2.1, + sip: 10.1.2.2, + eip: 10.1.2.5, + domain_name: "calm.nutanix.com", + dns_servers: ["8.8.8.8", "8.8.8.4"], + domain_search: ["calm.nutanix.com", "eng.nutanix.com"], + } + - { + name: Dev-SubnetA, + vpc_name: Dev, + nip: 10.1.1.0, + prefix: 24, + gip: 10.1.1.1, + sip: 10.1.1.2, + eip: 10.1.1.5, + domain_name: "calm.nutanix.com", + dns_servers: ["8.8.8.8", "8.8.8.4"], + domain_search: ["calm.nutanix.com", "eng.nutanix.com"], + } + - { + name: Dev-SubnetB, + vpc_name: Dev, + nip: 10.1.2.0, + prefix: 24, + gip: 10.1.2.1, + sip: 10.1.2.2, + eip: 10.1.2.5, + domain_name: "calm.nutanix.com", + dns_servers: ["8.8.8.8", "8.8.8.4"], + domain_search: ["calm.nutanix.com", "eng.nutanix.com"], + } - name: Inputs for vm task include_tasks: vm.yml with_items: - - {name: "Prod-Wordpress-App", desc: "Prod-Wordpress-App", is_connected: True , subnet_name: Prod-SubnetA, image_name: "wordpress-appserver", private_ip: ""} - - {name: "Prod-Wordpress-DB", desc: "Prod-Wordpress-DB", is_connected: True , subnet_name: Prod-SubnetB, image_name: "wordpress-db", private_ip: 10.1.2.5} - - {name: "Dev-Wordpress-App", desc: "Dev-Wordpress-App", is_connected: True , subnet_name: Dev-SubnetA, image_name: "wordpress-appserver", private_ip: ""} - - {name: "Dev-Wordpress-DB", desc: "Dev-Wordpress-DB", is_connected: True , subnet_name: Dev-SubnetB, image_name: "wordpress-db",private_ip: 10.1.2.5} + - { + name: "Prod-Wordpress-App", + desc: "Prod-Wordpress-App", + is_connected: True, + subnet_name: Prod-SubnetA, + image_name: "wordpress-appserver", + private_ip: "", + } + - { + name: "Prod-Wordpress-DB", + desc: "Prod-Wordpress-DB", + is_connected: True, + subnet_name: Prod-SubnetB, + image_name: "wordpress-db", + private_ip: 10.1.2.5, + } + - { + name: "Dev-Wordpress-App", + desc: "Dev-Wordpress-App", + is_connected: True, + subnet_name: Dev-SubnetA, + image_name: "wordpress-appserver", + private_ip: "", + } + - { + name: "Dev-Wordpress-DB", + desc: "Dev-Wordpress-DB", + is_connected: True, + subnet_name: Dev-SubnetB, + image_name: "wordpress-db", + private_ip: 10.1.2.5, + } - name: Inputs for Floating IP task include_tasks: fip.yml with_items: - - {vm_name: "Prod-Wordpress-App"} - - {vm_name: "Dev-Wordpress-App"} - + - { vm_name: "Prod-Wordpress-App" } + - { vm_name: "Dev-Wordpress-App" } ``` diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 4d6506826..1b0d077bc 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -3,457 +3,469 @@ releases: 1.0.0: changes: bugfixes: - - Creating a VM based on a disk_image without specifying the size_gb - - icmp "any" code value in module PBR + - Creating a VM based on a disk_image without specifying the size_gb + - icmp "any" code value in module PBR minor_changes: - - Add meta file for collection - - Allow environment variables for nutanix connection parameters - release_date: '2022-03-02' + - Add meta file for collection + - Allow environment variables for nutanix connection parameters + release_date: "2022-03-02" 1.0.0-beta.1: changes: bugfixes: - - Client SDK with inventory [\#45](https://github.com/nutanix/nutanix.ansible/pull/45) - - Fix error messages for get_uuid() reponse [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) - - black fixes [\#30](https://github.com/nutanix/nutanix.ansible/pull/30) - - black fixes [\#32](https://github.com/nutanix/nutanix.ansible/pull/32) - - clear unused files and argument [\#29](https://github.com/nutanix/nutanix.ansible/pull/29) - - device index calculation fixes, updates for get by name functionality[\#254](https://github.com/nutanix/nutanix.ansible/pull/42) - - fixes to get spec from collection [\#17](https://github.com/nutanix/nutanix.ansible/pull/17) - - solve python 2.7 issues [\#41](https://github.com/nutanix/nutanix.ansible/pull/41) - - updates for guest customization spec [\#20](https://github.com/nutanix/nutanix.ansible/pull/20) + - Client SDK with inventory [\#45](https://github.com/nutanix/nutanix.ansible/pull/45) + - Fix error messages for get_uuid() response [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) + - black fixes [\#30](https://github.com/nutanix/nutanix.ansible/pull/30) + - black fixes [\#32](https://github.com/nutanix/nutanix.ansible/pull/32) + - clear unused files and argument [\#29](https://github.com/nutanix/nutanix.ansible/pull/29) + - device index calculation fixes, updates for get by name functionality[\#254](https://github.com/nutanix/nutanix.ansible/pull/42) + - fixes to get spec from collection [\#17](https://github.com/nutanix/nutanix.ansible/pull/17) + - solve python 2.7 issues [\#41](https://github.com/nutanix/nutanix.ansible/pull/41) + - updates for guest customization spec [\#20](https://github.com/nutanix/nutanix.ansible/pull/20) major_changes: - - CICD pipeline using GitHub actions + - CICD pipeline using GitHub actions modules: - - description: Nutanix module for vms - name: ntnx_vms - namespace: '' - release_date: '2022-01-28' + - description: Nutanix module for vms + name: ntnx_vms + namespace: "" + release_date: "2022-01-28" 1.0.0-beta.2: changes: bugfixes: - - Bug/cluster UUID issue68 [\#72](https://github.com/nutanix/nutanix.ansible/pull/72) - - Fix/integ [\#96](https://github.com/nutanix/nutanix.ansible/pull/96) - - Sanity and python fix [\#46](https://github.com/nutanix/nutanix.ansible/pull/46) - - Task/fix failing sanity [\#117](https://github.com/nutanix/nutanix.ansible/pull/117) - - clean up pbrs.py [\#113](https://github.com/nutanix/nutanix.ansible/pull/113) - - code cleanup - fix github issue#59 [\#60](https://github.com/nutanix/nutanix.ansible/pull/60) - - fix project name [\#107](https://github.com/nutanix/nutanix.ansible/pull/107) - - fixed variables names issue74 [\#77](https://github.com/nutanix/nutanix.ansible/pull/77) + - Bug/cluster UUID issue68 [\#72](https://github.com/nutanix/nutanix.ansible/pull/72) + - Fix/integ [\#96](https://github.com/nutanix/nutanix.ansible/pull/96) + - Sanity and python fix [\#46](https://github.com/nutanix/nutanix.ansible/pull/46) + - Task/fix failing sanity [\#117](https://github.com/nutanix/nutanix.ansible/pull/117) + - clean up pbrs.py [\#113](https://github.com/nutanix/nutanix.ansible/pull/113) + - code cleanup - fix github issue#59 [\#60](https://github.com/nutanix/nutanix.ansible/pull/60) + - fix project name [\#107](https://github.com/nutanix/nutanix.ansible/pull/107) + - fixed variables names issue74 [\#77](https://github.com/nutanix/nutanix.ansible/pull/77) minor_changes: - - Codegen - Ansible code generator - - Imprv cluster uuid [\#75](https://github.com/nutanix/nutanix.ansible/pull/75) - - Imprv/code coverage [\#97](https://github.com/nutanix/nutanix.ansible/pull/97) - - Imprv/vpcs network prefix [\#81](https://github.com/nutanix/nutanix.ansible/pull/81) + - Codegen - Ansible code generator + - Imprv cluster uuid [\#75](https://github.com/nutanix/nutanix.ansible/pull/75) + - Imprv/code coverage [\#97](https://github.com/nutanix/nutanix.ansible/pull/97) + - Imprv/vpcs network prefix [\#81](https://github.com/nutanix/nutanix.ansible/pull/81) modules: - - description: Nutanix module for floating Ips - name: ntnx_floating_ips - namespace: '' - - description: Nutanix module for policy based routing - name: ntnx_pbrs - namespace: '' - - description: Nutanix module for subnets - name: ntnx_subnets - namespace: '' - - description: Nutanix module for vpcs - name: ntnx_vpcs - namespace: '' - release_date: '2022-02-22' + - description: Nutanix module for floating Ips + name: ntnx_floating_ips + namespace: "" + - description: Nutanix module for policy based routing + name: ntnx_pbrs + namespace: "" + - description: Nutanix module for subnets + name: ntnx_subnets + namespace: "" + - description: Nutanix module for vpcs + name: ntnx_vpcs + namespace: "" + release_date: "2022-02-22" 1.1.0: changes: minor_changes: - - Added integration tests for foundation and foundation central + - Added integration tests for foundation and foundation central 1.1.0-beta.1: modules: - - description: Nutanix module to image nodes and optionally create clusters - name: ntnx_foundation - namespace: '' - - description: Nutanix module which configures IPMI IP address on BMC of nodes. - name: ntnx_foundation_bmc_ipmi_config - namespace: '' - - description: Nutanix module which returns nodes discovered by Foundation - name: ntnx_foundation_discover_nodes_info - namespace: '' - - description: Nutanix module which returns the hypervisor images uploaded to - Foundation - name: ntnx_foundation_hypervisor_images_info - namespace: '' - - description: Nutanix module which uploads hypervisor or AOS image to foundation - vm. - name: ntnx_foundation_image_upload - namespace: '' - - description: Nutanix module which returns node network information discovered - by Foundation - name: ntnx_foundation_node_network_info - namespace: '' - release_date: '2022-04-11' + - description: Nutanix module to image nodes and optionally create clusters + name: ntnx_foundation + namespace: "" + - description: Nutanix module which configures IPMI IP address on BMC of nodes. + name: ntnx_foundation_bmc_ipmi_config + namespace: "" + - description: Nutanix module which returns nodes discovered by Foundation + name: ntnx_foundation_discover_nodes_info + namespace: "" + - description: + Nutanix module which returns the hypervisor images uploaded to + Foundation + name: ntnx_foundation_hypervisor_images_info + namespace: "" + - description: + Nutanix module which uploads hypervisor or AOS image to foundation + vm. + name: ntnx_foundation_image_upload + namespace: "" + - description: + Nutanix module which returns node network information discovered + by Foundation + name: ntnx_foundation_node_network_info + namespace: "" + release_date: "2022-04-11" 1.1.0-beta.2: modules: - - description: Nutanix module to imaged Nodes and optionally create cluster - name: ntnx_foundation_central - namespace: '' - - description: Nutanix module which creates api key for foundation central - name: ntnx_foundation_central_api_keys - namespace: '' - - description: Nutanix module which returns the api key - name: ntnx_foundation_central_api_keys_info - namespace: '' - - description: Nutanix module which returns the imaged clusters within the Foudation - Central - name: ntnx_foundation_central_imaged_clusters_info - namespace: '' - - description: Nutanix module which returns the imaged nodes within the Foudation - Central - name: ntnx_foundation_central_imaged_nodes_info - namespace: '' - release_date: '2022-04-28' + - description: Nutanix module to imaged Nodes and optionally create cluster + name: ntnx_foundation_central + namespace: "" + - description: Nutanix module which creates api key for foundation central + name: ntnx_foundation_central_api_keys + namespace: "" + - description: Nutanix module which returns the api key + name: ntnx_foundation_central_api_keys_info + namespace: "" + - description: + Nutanix module which returns the imaged clusters within the Foudation + Central + name: ntnx_foundation_central_imaged_clusters_info + namespace: "" + - description: + Nutanix module which returns the imaged nodes within the Foudation + Central + name: ntnx_foundation_central_imaged_nodes_info + namespace: "" + release_date: "2022-04-28" 1.2.0: changes: minor_changes: - - VM's update functionality + - VM's update functionality modules: - - description: Nutanix info module for floating Ips - name: ntnx_floating_ips_info - namespace: '' - - description: Nutanix info module for policy based routing - name: ntnx_pbrs_info - namespace: '' - - description: Nutanix info module for subnets - name: ntnx_subnets_info - namespace: '' - - description: VM module which supports VM clone operations - name: ntnx_vms_clone - namespace: '' - - description: Nutanix info module for vms - name: ntnx_vms_info - namespace: '' - - description: VM module which supports ova creation - name: ntnx_vms_ova - namespace: '' - - description: Nutanix info module for vpcs - name: ntnx_vpcs_info - namespace: '' - release_date: '2022-06-03' + - description: Nutanix info module for floating Ips + name: ntnx_floating_ips_info + namespace: "" + - description: Nutanix info module for policy based routing + name: ntnx_pbrs_info + namespace: "" + - description: Nutanix info module for subnets + name: ntnx_subnets_info + namespace: "" + - description: VM module which supports VM clone operations + name: ntnx_vms_clone + namespace: "" + - description: Nutanix info module for vms + name: ntnx_vms_info + namespace: "" + - description: VM module which supports ova creation + name: ntnx_vms_ova + namespace: "" + - description: Nutanix info module for vpcs + name: ntnx_vpcs_info + namespace: "" + release_date: "2022-06-03" 1.3.0: modules: - - description: image placement policies info module - name: ntnx_image_placement_policies_info - namespace: '' - - description: image placement policy module which supports Create, update and - delete operations - name: ntnx_image_placement_policy - namespace: '' - - description: images module which supports pc images management CRUD operations - name: ntnx_images - namespace: '' - - description: images info module - name: ntnx_images_info - namespace: '' - - description: security_rule module which suports security_rule CRUD operations - name: ntnx_security_rules - namespace: '' - - description: security_rule info module - name: ntnx_security_rules_info - namespace: '' - - description: vpc static routes - name: ntnx_static_routes - namespace: '' - - description: vpc static routes info module - name: ntnx_static_routes_info - namespace: '' - release_date: '2022-07-04' + - description: image placement policies info module + name: ntnx_image_placement_policies_info + namespace: "" + - description: + image placement policy module which supports Create, update and + delete operations + name: ntnx_image_placement_policy + namespace: "" + - description: images module which supports pc images management CRUD operations + name: ntnx_images + namespace: "" + - description: images info module + name: ntnx_images_info + namespace: "" + - description: security_rule module which suports security_rule CRUD operations + name: ntnx_security_rules + namespace: "" + - description: security_rule info module + name: ntnx_security_rules_info + namespace: "" + - description: vpc static routes + name: ntnx_static_routes + namespace: "" + - description: vpc static routes info module + name: ntnx_static_routes_info + namespace: "" + release_date: "2022-07-04" 1.4.0: changes: bugfixes: - - Fix examples of info modules [\#226](https://github.com/nutanix/nutanix.ansible/issues/226) + - Fix examples of info modules [\#226](https://github.com/nutanix/nutanix.ansible/issues/226) modules: - - description: acp module which suports acp Create, update and delete operations - name: ntnx_acps - namespace: '' - - description: acp info module - name: ntnx_acps_info - namespace: '' - - description: module which supports address groups CRUD operations - name: ntnx_address_groups - namespace: '' - - description: address groups info module - name: ntnx_address_groups_info - namespace: '' - - description: category module which supports pc category management CRUD operations - name: ntnx_categories - namespace: '' - - description: categories info module - name: ntnx_categories_info - namespace: '' - - description: cluster info module - name: ntnx_clusters_info - namespace: '' - - description: host info module - name: ntnx_hosts_info - namespace: '' - - description: permissions info module - name: ntnx_permissions_info - namespace: '' - - description: module for create, update and delete pc projects - name: ntnx_projects - namespace: '' - - description: projects info module - name: ntnx_projects_info - namespace: '' - - description: module which supports role CRUD operations - name: ntnx_roles - namespace: '' - - description: role info module - name: ntnx_roles_info - namespace: '' - - description: service_groups module which suports service_groups CRUD operations - name: ntnx_service_groups - namespace: '' - - description: service_group info module - name: ntnx_service_groups_info - namespace: '' - - description: user_groups module which supports pc user_groups management create - delete operations - name: ntnx_user_groups - namespace: '' - - description: User Groups info module - name: ntnx_user_groups_info - namespace: '' - - description: users module which supports pc users management create delete operations - name: ntnx_users - namespace: '' - - description: users info module - name: ntnx_users_info - namespace: '' - release_date: '2022-07-28' + - description: acp module which suports acp Create, update and delete operations + name: ntnx_acps + namespace: "" + - description: acp info module + name: ntnx_acps_info + namespace: "" + - description: module which supports address groups CRUD operations + name: ntnx_address_groups + namespace: "" + - description: address groups info module + name: ntnx_address_groups_info + namespace: "" + - description: category module which supports pc category management CRUD operations + name: ntnx_categories + namespace: "" + - description: categories info module + name: ntnx_categories_info + namespace: "" + - description: cluster info module + name: ntnx_clusters_info + namespace: "" + - description: host info module + name: ntnx_hosts_info + namespace: "" + - description: permissions info module + name: ntnx_permissions_info + namespace: "" + - description: module for create, update and delete pc projects + name: ntnx_projects + namespace: "" + - description: projects info module + name: ntnx_projects_info + namespace: "" + - description: module which supports role CRUD operations + name: ntnx_roles + namespace: "" + - description: role info module + name: ntnx_roles_info + namespace: "" + - description: service_groups module which suports service_groups CRUD operations + name: ntnx_service_groups + namespace: "" + - description: service_group info module + name: ntnx_service_groups_info + namespace: "" + - description: + user_groups module which supports pc user_groups management create + delete operations + name: ntnx_user_groups + namespace: "" + - description: User Groups info module + name: ntnx_user_groups_info + namespace: "" + - description: users module which supports pc users management create delete operations + name: ntnx_users + namespace: "" + - description: users info module + name: ntnx_users_info + namespace: "" + release_date: "2022-07-28" 1.5.0: modules: - - description: Nutanix module for protection rules - name: ntnx_protection_rules - namespace: '' - - description: Nutanix info module for protection rules - name: ntnx_protection_rules_info - namespace: '' - - description: Nutanix module for recovery plan jobs - name: ntnx_recovery_plan_jobs - namespace: '' - - description: Nutanix info module for protection - name: ntnx_recovery_plan_jobs_info - namespace: '' - - description: Nutanix module for recovery plan - name: ntnx_recovery_plans - namespace: '' - - description: Nutanix info module for recovery plan - name: ntnx_recovery_plans_info - namespace: '' + - description: Nutanix module for protection rules + name: ntnx_protection_rules + namespace: "" + - description: Nutanix info module for protection rules + name: ntnx_protection_rules_info + namespace: "" + - description: Nutanix module for recovery plan jobs + name: ntnx_recovery_plan_jobs + namespace: "" + - description: Nutanix info module for protection + name: ntnx_recovery_plan_jobs_info + namespace: "" + - description: Nutanix module for recovery plan + name: ntnx_recovery_plans + namespace: "" + - description: Nutanix info module for recovery plan + name: ntnx_recovery_plans_info + namespace: "" 1.6.0: modules: - - description: Nutanix module for karbon clusters - name: ntnx_karbon_clusters - namespace: '' - - description: Nutanix info module for karbon clusters with kubeconifg and ssh - config - name: ntnx_karbon_clusters_info - namespace: '' - - description: Nutanix module for karbon private registry - name: ntnx_karbon_registries - namespace: '' - - description: Nutanix info module for karbon private registry - name: ntnx_karbon_registries_info - namespace: '' - release_date: '2022-09-09' + - description: Nutanix module for karbon clusters + name: ntnx_karbon_clusters + namespace: "" + - description: + Nutanix info module for karbon clusters with kubeconifg and ssh + config + name: ntnx_karbon_clusters_info + namespace: "" + - description: Nutanix module for karbon private registry + name: ntnx_karbon_registries + namespace: "" + - description: Nutanix info module for karbon private registry + name: ntnx_karbon_registries_info + namespace: "" + release_date: "2022-09-09" 1.7.0: changes: bugfixes: - - ntnx_projects - [Bug] Clusters and subnets configured in project are not visible - in new projects UI [\#283](https://github.com/nutanix/nutanix.ansible/issues/283) - - ntnx_vms - Subnet Name --> UUID Lookup should be PE Cluster Aware [\#260](https://github.com/nutanix/nutanix.ansible/issues/260) - - nutanix.ncp.ntnx_prism_vm_inventory - [Bug] Inventory does not fetch more - than 500 Entities [[\#228](https://github.com/nutanix/nutanix.ansible/issues/228)] + - ntnx_projects - [Bug] Clusters and subnets configured in project are not visible + in new projects UI [\#283](https://github.com/nutanix/nutanix.ansible/issues/283) + - ntnx_vms - Subnet Name --> UUID Lookup should be PE Cluster Aware [\#260](https://github.com/nutanix/nutanix.ansible/issues/260) + - nutanix.ncp.ntnx_prism_vm_inventory - [Bug] Inventory does not fetch more + than 500 Entities [[\#228](https://github.com/nutanix/nutanix.ansible/issues/228)] minor_changes: - - examples - [Imprv] Add version related notes to examples [\#279](https://github.com/nutanix/nutanix.ansible/issues/279) - - examples - [Imprv] Fix IaaS example [\#250](https://github.com/nutanix/nutanix.ansible/issues/250) - - examples - [Imprv] add examples of Images and Static Routes Module [\#256](https://github.com/nutanix/nutanix.ansible/issues/256) - - ntnx_projects - [Feat] Add capability to configure role mappings with collaboration - on/off in ntnx_projects [\#252](https://github.com/nutanix/nutanix.ansible/issues/252) - - ntnx_projects - [Imprv] add vpcs and overlay subnets configure capability - to module ntnx_projects [\#289](https://github.com/nutanix/nutanix.ansible/issues/289) - - ntnx_vms - [Imprv] add functionality to set network mac_address to module - ntnx_vms [\#201](https://github.com/nutanix/nutanix.ansible/issues/201) - - nutanix.ncp.ntnx_prism_vm_inventory - [Imprv] add functionality constructed - to module inventory [\#235](https://github.com/nutanix/nutanix.ansible/issues/235) - release_date: '2022-09-30' + - examples - [Imprv] Add version related notes to examples [\#279](https://github.com/nutanix/nutanix.ansible/issues/279) + - examples - [Imprv] Fix IaaS example [\#250](https://github.com/nutanix/nutanix.ansible/issues/250) + - examples - [Imprv] add examples of Images and Static Routes Module [\#256](https://github.com/nutanix/nutanix.ansible/issues/256) + - ntnx_projects - [Feat] Add capability to configure role mappings with collaboration + on/off in ntnx_projects [\#252](https://github.com/nutanix/nutanix.ansible/issues/252) + - ntnx_projects - [Imprv] add vpcs and overlay subnets configure capability + to module ntnx_projects [\#289](https://github.com/nutanix/nutanix.ansible/issues/289) + - ntnx_vms - [Imprv] add functionality to set network mac_address to module + ntnx_vms [\#201](https://github.com/nutanix/nutanix.ansible/issues/201) + - nutanix.ncp.ntnx_prism_vm_inventory - [Imprv] add functionality constructed + to module inventory [\#235](https://github.com/nutanix/nutanix.ansible/issues/235) + release_date: "2022-09-30" 1.8.0: modules: - - description: module for authorizing db server vm - name: ntnx_ndb_authorize_db_server_vms - namespace: '' - - description: Create, Update and Delete NDB clusters - name: ntnx_ndb_clusters - namespace: '' - - description: module for database clone refresh. - name: ntnx_ndb_database_clone_refresh - namespace: '' - - description: module for create, update and delete of ndb database clones - name: ntnx_ndb_database_clones - namespace: '' - - description: module for performing log catchups action - name: ntnx_ndb_database_log_catchup - namespace: '' - - description: module for restoring database instance - name: ntnx_ndb_database_restore - namespace: '' - - description: module for scaling database instance - name: ntnx_ndb_database_scale - namespace: '' - - description: module for creating, updating and deleting database snapshots - name: ntnx_ndb_database_snapshots - namespace: '' - - description: module for create, delete and update of database server vms - name: ntnx_ndb_db_server_vms - namespace: '' - - description: module to manage linked databases of a database instance - name: ntnx_ndb_linked_databases - namespace: '' - - description: module to add and remove maintenance related tasks - name: ntnx_ndb_maintenance_tasks - namespace: '' - - description: module to create, update and delete mainetance window - name: ntnx_ndb_maintenance_window - namespace: '' - - description: module for fetching maintenance windows info - name: ntnx_ndb_maintenance_windows_info - namespace: '' - - description: module for create, update and delete of profiles - name: ntnx_ndb_profiles - namespace: '' - - description: module for database instance registration - name: ntnx_ndb_register_database - namespace: '' - - description: module for registration of database server vm - name: ntnx_ndb_register_db_server_vm - namespace: '' - - description: module for replicating database snapshots across clusters of time - machine - name: ntnx_ndb_replicate_database_snapshots - namespace: '' - - description: moudle for creating, updating and deleting slas - name: ntnx_ndb_slas - namespace: '' - - description: info module for ndb snapshots info - name: ntnx_ndb_snapshots_info - namespace: '' - - description: Module for create, update and delete of stretched vlan. - name: ntnx_ndb_stretched_vlans - namespace: '' - - description: module for create, update and delete of tags - name: ntnx_ndb_tags - namespace: '' - - description: Module for create, update and delete for data access management - in time machines. - name: ntnx_ndb_time_machine_clusters - namespace: '' - - description: Module for create, update and delete of ndb vlan. - name: ntnx_ndb_vlans - namespace: '' - - description: info module for ndb vlans - name: ntnx_ndb_vlans_info - namespace: '' - release_date: '2023-02-28' + - description: module for authorizing db server vm + name: ntnx_ndb_authorize_db_server_vms + namespace: "" + - description: Create, Update and Delete NDB clusters + name: ntnx_ndb_clusters + namespace: "" + - description: module for database clone refresh. + name: ntnx_ndb_database_clone_refresh + namespace: "" + - description: module for create, update and delete of ndb database clones + name: ntnx_ndb_database_clones + namespace: "" + - description: module for performing log catchups action + name: ntnx_ndb_database_log_catchup + namespace: "" + - description: module for restoring database instance + name: ntnx_ndb_database_restore + namespace: "" + - description: module for scaling database instance + name: ntnx_ndb_database_scale + namespace: "" + - description: module for creating, updating and deleting database snapshots + name: ntnx_ndb_database_snapshots + namespace: "" + - description: module for create, delete and update of database server vms + name: ntnx_ndb_db_server_vms + namespace: "" + - description: module to manage linked databases of a database instance + name: ntnx_ndb_linked_databases + namespace: "" + - description: module to add and remove maintenance related tasks + name: ntnx_ndb_maintenance_tasks + namespace: "" + - description: module to create, update and delete maintenance window + name: ntnx_ndb_maintenance_window + namespace: "" + - description: module for fetching maintenance windows info + name: ntnx_ndb_maintenance_windows_info + namespace: "" + - description: module for create, update and delete of profiles + name: ntnx_ndb_profiles + namespace: "" + - description: module for database instance registration + name: ntnx_ndb_register_database + namespace: "" + - description: module for registration of database server vm + name: ntnx_ndb_register_db_server_vm + namespace: "" + - description: + module for replicating database snapshots across clusters of time + machine + name: ntnx_ndb_replicate_database_snapshots + namespace: "" + - description: moudle for creating, updating and deleting slas + name: ntnx_ndb_slas + namespace: "" + - description: info module for ndb snapshots info + name: ntnx_ndb_snapshots_info + namespace: "" + - description: Module for create, update and delete of stretched vlan. + name: ntnx_ndb_stretched_vlans + namespace: "" + - description: module for create, update and delete of tags + name: ntnx_ndb_tags + namespace: "" + - description: + Module for create, update and delete for data access management + in time machines. + name: ntnx_ndb_time_machine_clusters + namespace: "" + - description: Module for create, update and delete of ndb vlan. + name: ntnx_ndb_vlans + namespace: "" + - description: info module for ndb vlans + name: ntnx_ndb_vlans_info + namespace: "" + release_date: "2023-02-28" 1.8.0-beta.1: modules: - - description: info module for database clones - name: ntnx_ndb_clones_info - namespace: '' - - description: info module for ndb clusters info - name: ntnx_ndb_clusters_info - namespace: '' - - description: Module for create, update and delete of single instance database. - Currently, postgres type database is officially supported. - name: ntnx_ndb_databases - namespace: '' - - description: info module for ndb database instances - name: ntnx_ndb_databases_info - namespace: '' - - description: info module for ndb db server vms info - name: ntnx_ndb_db_servers_info - namespace: '' - - description: info module for ndb profiles - name: ntnx_ndb_profiles_info - namespace: '' - - description: info module for ndb slas - name: ntnx_ndb_slas_info - namespace: '' - - description: info module for ndb time machines - name: ntnx_ndb_time_machines_info - namespace: '' - release_date: '2022-10-20' + - description: info module for database clones + name: ntnx_ndb_clones_info + namespace: "" + - description: info module for ndb clusters info + name: ntnx_ndb_clusters_info + namespace: "" + - description: + Module for create, update and delete of single instance database. + Currently, postgres type database is officially supported. + name: ntnx_ndb_databases + namespace: "" + - description: info module for ndb database instances + name: ntnx_ndb_databases_info + namespace: "" + - description: info module for ndb db server vms info + name: ntnx_ndb_db_servers_info + namespace: "" + - description: info module for ndb profiles + name: ntnx_ndb_profiles_info + namespace: "" + - description: info module for ndb slas + name: ntnx_ndb_slas_info + namespace: "" + - description: info module for ndb time machines + name: ntnx_ndb_time_machines_info + namespace: "" + release_date: "2022-10-20" 1.9.0: changes: bugfixes: - - info modules - [Bug] Multiple filters params are not considered for fetching - entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] - - ntnx_foundation - [Bug] clusters parameters not being passed to Foundation - Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] - - ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml - [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] - - ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using - disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] - - ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets - [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) + - info modules - [Bug] Multiple filters params are not considered for fetching + entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] + - ntnx_foundation - [Bug] clusters parameters not being passed to Foundation + Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] + - ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml + [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] + - ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using + disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] + - ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets + [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) deprecated_features: - - ntnx_security_rules - The ``apptier`` option in target group has been removed. - New option called ``apptiers`` has been added to support multi tier policy. + - ntnx_security_rules - The ``apptier`` option in target group has been removed. + New option called ``apptiers`` has been added to support multi tier policy. minor_changes: - - ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs - for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) - - ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in - Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) + - ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs + for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) + - ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in + Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) modules: - - description: Create,Update and Delete a worker node pools with the provided - configuration. - name: ntnx_karbon_clusters_node_pools - namespace: '' - - description: info module for ndb tags info - name: ntnx_ndb_tags_info - namespace: '' - release_date: '2023-07-11' + - description: + Create,Update and Delete a worker node pools with the provided + configuration. + name: ntnx_karbon_clusters_node_pools + namespace: "" + - description: info module for ndb tags info + name: ntnx_ndb_tags_info + namespace: "" + release_date: "2023-07-11" 1.9.1: changes: bugfixes: - - ntnx_foundation - [Bug] Error when Clusters Block is missing in module ntnx_foundation - [[\#397](https://github.com/nutanix/nutanix.ansible/issues/397)] - - ntnx_ndb_time_machines_info - [Bug] ntnx_ndb_time_machines_info not fetching - all attributes when name is used for fetching [[\#418](https://github.com/nutanix/nutanix.ansible/issues/418)] - - ntnx_security_rules - Fix Syntax Errors in Create App Security Rule Example - [[\#394](https://github.com/nutanix/nutanix.ansible/pull/394/files)] - - ntnx_vms - [Bug] Error when updating size_gb using the int filter in module - ntnx_vms [[\#400](https://github.com/nutanix/nutanix.ansible/issues/400)] - - ntnx_vms - [Bug] hard_poweroff has been moved to state from operation [[\#415](https://github.com/nutanix/nutanix.ansible/issues/415)] - - ntnx_vms_clone - [Bug] cannot change boot_config when cloning in module ntnx_vms_clone - [[\#360](https://github.com/nutanix/nutanix.ansible/issues/359)] - - website - [Bug] Github page deployment action is failing. [[\#483](https://github.com/nutanix/nutanix.ansible/issues/483)] + - ntnx_foundation - [Bug] Error when Clusters Block is missing in module ntnx_foundation + [[\#397](https://github.com/nutanix/nutanix.ansible/issues/397)] + - ntnx_ndb_time_machines_info - [Bug] ntnx_ndb_time_machines_info not fetching + all attributes when name is used for fetching [[\#418](https://github.com/nutanix/nutanix.ansible/issues/418)] + - ntnx_security_rules - Fix Syntax Errors in Create App Security Rule Example + [[\#394](https://github.com/nutanix/nutanix.ansible/pull/394/files)] + - ntnx_vms - [Bug] Error when updating size_gb using the int filter in module + ntnx_vms [[\#400](https://github.com/nutanix/nutanix.ansible/issues/400)] + - ntnx_vms - [Bug] hard_poweroff has been moved to state from operation [[\#415](https://github.com/nutanix/nutanix.ansible/issues/415)] + - ntnx_vms_clone - [Bug] cannot change boot_config when cloning in module ntnx_vms_clone + [[\#360](https://github.com/nutanix/nutanix.ansible/issues/359)] + - website - [Bug] Github page deployment action is failing. [[\#483](https://github.com/nutanix/nutanix.ansible/issues/483)] minor_changes: - - docs - [Imprv] add doc regarding running integration tests locally [[\#435](https://github.com/nutanix/nutanix.ansible/issues/435)] - - info modules - [Imprv] add examples for custom_filter [[\#416](https://github.com/nutanix/nutanix.ansible/issues/416)] - - ndb clones - [Imprv] Enable database clones and clone refresh using latest - snapshot flag [[\#391](https://github.com/nutanix/nutanix.ansible/issues/391)] - - ndb clones - [Imprv] add examples for NDB database clone under examples folder - [[\#386](https://github.com/nutanix/nutanix.ansible/issues/386)] - - ntnx_prism_vm_inventory - Add support for PC Categories [[\#405](https://github.com/nutanix/nutanix.ansible/issues/405)] - - ntnx_prism_vm_inventory - [Imprv] add examples for dynamic inventory using - ntnx_prism_vm_inventory [[\#401](https://github.com/nutanix/nutanix.ansible/issues/401)] - - ntnx_vms - [Imprv] add possibility to specify / modify vm user ownership and - project [[\#378](https://github.com/nutanix/nutanix.ansible/issues/378)] - - ntnx_vms - owner association upon vm creation module [[\#359](https://github.com/nutanix/nutanix.ansible/issues/359)] - - ntnx_vms_info - [Imprv] add examples with guest customization for module ntnx_vms - [[\#395](https://github.com/nutanix/nutanix.ansible/issues/395)] + - docs - [Imprv] add doc regarding running integration tests locally [[\#435](https://github.com/nutanix/nutanix.ansible/issues/435)] + - info modules - [Imprv] add examples for custom_filter [[\#416](https://github.com/nutanix/nutanix.ansible/issues/416)] + - ndb clones - [Imprv] Enable database clones and clone refresh using latest + snapshot flag [[\#391](https://github.com/nutanix/nutanix.ansible/issues/391)] + - ndb clones - [Imprv] add examples for NDB database clone under examples folder + [[\#386](https://github.com/nutanix/nutanix.ansible/issues/386)] + - ntnx_prism_vm_inventory - Add support for PC Categories [[\#405](https://github.com/nutanix/nutanix.ansible/issues/405)] + - ntnx_prism_vm_inventory - [Imprv] add examples for dynamic inventory using + ntnx_prism_vm_inventory [[\#401](https://github.com/nutanix/nutanix.ansible/issues/401)] + - ntnx_vms - [Imprv] add possibility to specify / modify vm user ownership and + project [[\#378](https://github.com/nutanix/nutanix.ansible/issues/378)] + - ntnx_vms - owner association upon vm creation module [[\#359](https://github.com/nutanix/nutanix.ansible/issues/359)] + - ntnx_vms_info - [Imprv] add examples with guest customization for module ntnx_vms + [[\#395](https://github.com/nutanix/nutanix.ansible/issues/395)] release_summary: This release included bug fixes and improvement. - release_date: '2023-10-09' + release_date: "2023-10-09" 1.9.2: changes: breaking_changes: - - nutanix.ncp collection - Due to all versions of ansible-core version less - than v2.15.0 are EOL, we are also deprecating support for same and minimum - version to use this collection is ansible-core==2.15.0. [[\#479](https://github.com/nutanix/nutanix.ansible/issues/479)] + - nutanix.ncp collection - Due to all versions of ansible-core version less + than v2.15.0 are EOL, we are also deprecating support for same and minimum + version to use this collection is ansible-core==2.15.0. [[\#479](https://github.com/nutanix/nutanix.ansible/issues/479)] release_summary: Deprecating support for ansible-core less than v2.15.0 - release_date: '2024-05-30' + release_date: "2024-05-30" diff --git a/examples/acp.yml b/examples/acp.yml index 8efb39915..297bc00df 100644 --- a/examples/acp.yml +++ b/examples/acp.yml @@ -12,15 +12,14 @@ validate_certs: false tasks: - - - name: Create ACP with all specfactions + - name: Create ACP with all specifications ntnx_acps: validate_certs: False state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - name: acp_with_all_specfactions + name: acp_with_all_specifications role: uuid: "{{ role.uuid }}" user_uuids: diff --git a/examples/fc/fc.yml b/examples/fc/fc.yml index 00f9732fb..1053195d8 100644 --- a/examples/fc/fc.yml +++ b/examples/fc/fc.yml @@ -83,7 +83,7 @@ - discovery_mode: node_serial: "" - discovery_mode: - node_serial: "" + node_serial: "" - discovery_mode: node_serial: "" discovery_override: diff --git a/examples/foundation/node_discovery_network_info.yml b/examples/foundation/node_discovery_network_info.yml index 2f81eb083..27a3a550d 100644 --- a/examples/foundation/node_discovery_network_info.yml +++ b/examples/foundation/node_discovery_network_info.yml @@ -5,21 +5,21 @@ collections: - nutanix.ncp tasks: - - name: Discover all nodes - ntnx_foundation_discover_nodes_info: - nutanix_host: "10.xx.xx.xx" - # unskip line 12 to include configured(nodes part of cluster) nodes in the output - # include_configured: true - register: discovered_nodes + - name: Discover all nodes + ntnx_foundation_discover_nodes_info: + nutanix_host: "10.xx.xx.xx" + # resume line 12 to include configured(nodes part of cluster) nodes in the output + # include_configured: true + register: discovered_nodes - # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module - - name: Get node network info of some discovered nodes - ntnx_foundation_node_network_info: - nutanix_host: "10.xx.xx.xx" - nodes: - - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" - - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" - register: result + # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module + - name: Get node network info of some discovered nodes + ntnx_foundation_node_network_info: + nutanix_host: "10.xx.xx.xx" + nodes: + - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" + - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" + register: result - - debug: - msg: "{{ result }}" + - debug: + msg: "{{ result }}" diff --git a/examples/images.yml b/examples/images.yml index 82f11491d..5d2db66d6 100644 --- a/examples/images.yml +++ b/examples/images.yml @@ -69,7 +69,7 @@ - Backup wait: true - - name: dettach all categories from existing image + - name: detach all categories from existing image ntnx_images: state: "present" image_uuid: "00000000-0000-0000-0000-000000000000" diff --git a/examples/karbon/create_registries.yml b/examples/karbon/create_registries.yml index 42c75e310..d843ace0b 100644 --- a/examples/karbon/create_registries.yml +++ b/examples/karbon/create_registries.yml @@ -1,5 +1,5 @@ --- -- name: create registeries +- name: create registries hosts: localhost gather_facts: false collections: diff --git a/examples/karbon/registries_info.yml b/examples/karbon/registries_info.yml index 81c2d8742..526cabd64 100644 --- a/examples/karbon/registries_info.yml +++ b/examples/karbon/registries_info.yml @@ -1,5 +1,5 @@ --- -- name: get registeries info +- name: get registries info hosts: localhost gather_facts: false collections: diff --git a/examples/ndb/README.md b/examples/ndb/README.md index 761d0ec59..52491bbc3 100644 --- a/examples/ndb/README.md +++ b/examples/ndb/README.md @@ -1,5 +1,5 @@ # Nutanix Database Service -Nutanix ansibe collection nutanix.ncp from v1.8.0 will contain modules for supporting Nutanix Database Service (NDB) features. +Nutanix ansible collection nutanix.ncp from v1.8.0 will contain modules for supporting Nutanix Database Service (NDB) features. These modules are based on workflow : diff --git a/examples/ndb/db_server_vms.yml b/examples/ndb/db_server_vms.yml index 7ae35cc47..131b49457 100644 --- a/examples/ndb/db_server_vms.yml +++ b/examples/ndb/db_server_vms.yml @@ -12,7 +12,6 @@ validate_certs: false tasks: - - name: create spec for db server vm using time machine check_mode: yes ntnx_ndb_db_server_vms: @@ -103,7 +102,6 @@ post_task_cmd: "ls -F" register: result - - name: update db server vm name, desc, credentials, tags ntnx_ndb_db_server_vms: wait: True @@ -129,12 +127,10 @@ password: "pass" register: result - - name: List NDB db_servers ntnx_ndb_db_servers_info: register: db_servers - - name: get NDB db_servers using it's name ntnx_ndb_db_servers_info: filters: @@ -175,8 +171,8 @@ - name: "{{vm1_name_updated}}" - uuid: "test_vm_1" db_server_clusters: - - uuid: "test_cluter_1" - - uuid: "test_cluter_2" + - uuid: "test_cluster_1" + - uuid: "test_cluster_2" maintenance_window: name: "{{maintenance.window_name}}" tasks: @@ -198,7 +194,6 @@ tasks: [] register: result - - name: remove maintenance tasks ntnx_ndb_maintenance_tasks: db_server_vms: @@ -208,7 +203,7 @@ tasks: [] register: result - - name: Add maitenance window task for vm + - name: Add maintenance window task for vm ntnx_ndb_maintenance_tasks: db_server_vms: - name: "{{vm1_name_updated}}" @@ -233,7 +228,7 @@ uuid: "{{db_server_uuid}}" register: result - - name: genereate check mode spec for delete vm with vgs and snapshots + - name: generate check mode spec for delete vm with vgs and snapshots check_mode: yes ntnx_ndb_db_server_vms: state: "absent" @@ -256,8 +251,7 @@ ################################### DB server VM Registration tasks ############################# - - - name: generate spec for registeration of the previous unregistered vm using check mode + - name: generate spec for registration of the previous unregistered vm using check mode check_mode: yes ntnx_ndb_register_db_server_vm: ip: "{{vm_ip}}" @@ -308,7 +302,6 @@ ################################### DB server VM Delete tasks ############################# - - name: unregister db server vm ntnx_ndb_db_server_vms: state: "absent" diff --git a/examples/ndb/provision_postgres_ha_instance_with_ips.yml b/examples/ndb/provision_postgres_ha_instance_with_ips.yml index 00e95fc68..eb5e2b5c6 100644 --- a/examples/ndb/provision_postgres_ha_instance_with_ips.yml +++ b/examples/ndb/provision_postgres_ha_instance_with_ips.yml @@ -1,6 +1,6 @@ --- -# Here we will be deploying high availibility postgres database with static IPs assigned -# to vms and virtul IP for HA proxy +# Here we will be deploying high availability postgres database with static IPs assigned +# to vms and virtual IP for HA proxy - name: Create stretched vlan hosts: localhost gather_facts: false diff --git a/examples/roles_crud.yml b/examples/roles_crud.yml index b01c02eca..b443afccc 100644 --- a/examples/roles_crud.yml +++ b/examples/roles_crud.yml @@ -14,7 +14,7 @@ ntnx_permissions_info: register: permissions - - name: Create a role with 2 permissions. Here we will be using name or uuid for referenceing permissions + - name: Create a role with 2 permissions. Here we will be using name or uuid for referencing permissions ntnx_roles: state: present name: test-ansible-role-1 diff --git a/examples/vm.yml b/examples/vm.yml index f88ab7064..079409f61 100644 --- a/examples/vm.yml +++ b/examples/vm.yml @@ -11,8 +11,8 @@ nutanix_password: validate_certs: false tasks: - - name: Setting Variables - set_fact: + - name: Setting Variables + set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -20,55 +20,55 @@ password: "" fqdn: "" - - name: Create Cloud-init Script file - copy: - dest: "cloud_init.yml" - content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: "{{ fqdn }}" + - name: Create Cloud-init Script file + copy: + dest: "cloud_init.yml" + content: | + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: "{{ fqdn }}" - - name: create Vm - ntnx_vms: - state: present - name: "ansible_automation_demo" - desc: "ansible_vm_description" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{cluster_name}}" - networks: - - is_connected: True - subnet: - name: "{{ subnet_name }}" - # mention cluster only when there are multiple subnets with same name accross clusters - # and subnet name is set above - cluster: - name: "{{cluster_name}}" - disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ image_name }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: output + - name: create Vm + ntnx_vms: + state: present + name: "ansible_automation_demo" + desc: "ansible_vm_description" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{cluster_name}}" + networks: + - is_connected: True + subnet: + name: "{{ subnet_name }}" + # mention cluster only when there are multiple subnets with same name across clusters + # and subnet name is set above + cluster: + name: "{{cluster_name}}" + disks: + - type: "DISK" + size_gb: 30 + bus: "SATA" + clone_image: + name: "{{ image_name }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: True + register: output - - name: output of vm created - debug: - msg: '{{ output }}' + - name: output of vm created + debug: + msg: "{{ output }}" - - name: delete VM - ntnx_vms: - state: absent - vm_uuid: "{{output.vm_uuid}}" + - name: delete VM + ntnx_vms: + state: absent + vm_uuid: "{{output.vm_uuid}}" diff --git a/examples/vm_update.yml b/examples/vm_update.yml index 45de3d642..b73f19648 100644 --- a/examples/vm_update.yml +++ b/examples/vm_update.yml @@ -61,7 +61,7 @@ - name: Update VM by deleting and editing disks and subnets ntnx_vms: vm_uuid: "{{ vm_uuid }}" - name: update diks + name: update disks desc: update disks disks: - type: "DISK" diff --git a/plugins/doc_fragments/ntnx_operations.py b/plugins/doc_fragments/ntnx_operations.py index 50ecb80a0..215484b4f 100644 --- a/plugins/doc_fragments/ntnx_operations.py +++ b/plugins/doc_fragments/ntnx_operations.py @@ -10,13 +10,13 @@ class ModuleDocFragment(object): - # Plugin options for ntnx CRUD opperations + # Plugin options for ntnx CRUD operations DOCUMENTATION = r""" options: state: description: - Specify state - - If C(state) is set to C(present) then the opperation will be create the item + - If C(state) is set to C(present) then the operation will be create the item - >- If C(state) is set to C(absent) and if the item exists, then item is removed. diff --git a/plugins/module_utils/ndb/database_engines/database_engine.py b/plugins/module_utils/ndb/database_engines/database_engine.py index aade70764..6ebb16f96 100644 --- a/plugins/module_utils/ndb/database_engines/database_engine.py +++ b/plugins/module_utils/ndb/database_engines/database_engine.py @@ -29,7 +29,7 @@ def build_spec_db_instance_register_action_arguments(self, payload, config): def build_spec_db_server_vm_register_action_arguments(self, payload, config): """ - Implement this method to add database engine specific properties for registeration database server vm + Implement this method to add database engine specific properties for registration database server vm """ return payload, None diff --git a/plugins/module_utils/prism/static_routes.py b/plugins/module_utils/prism/static_routes.py index 5627e943b..86304b963 100644 --- a/plugins/module_utils/prism/static_routes.py +++ b/plugins/module_utils/prism/static_routes.py @@ -51,7 +51,7 @@ def _build_default_route_spec(self, payload, next_hop): return payload, None def _build_spec_static_routes(self, payload, inp_static_routes): - # since static route list has to be overriden + # since static route list has to be overridden if payload["spec"]["resources"].get("default_route_nexthop"): payload["spec"]["resources"].pop("default_route_nexthop") static_routes_list = [] diff --git a/plugins/modules/ntnx_acps.py b/plugins/modules/ntnx_acps.py index afd57b89d..ddc22aee9 100644 --- a/plugins/modules/ntnx_acps.py +++ b/plugins/modules/ntnx_acps.py @@ -157,14 +157,14 @@ user_group_uuids: - "{{ user_group_uuid }}" -- name: Create ACP with all specfactions +- name: Create ACP with all specifications ntnx_acps: validate_certs: False state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - name: acp_with_all_specfactions + name: acp_with_all_specifications role: uuid: "{{ role.uuid }}" user_uuids: diff --git a/plugins/modules/ntnx_ndb_clusters.py b/plugins/modules/ntnx_ndb_clusters.py index 24a3b6a4e..964906722 100644 --- a/plugins/modules/ntnx_ndb_clusters.py +++ b/plugins/modules/ntnx_ndb_clusters.py @@ -141,7 +141,7 @@ """ EXAMPLES = r""" - - name: Register Cluster with prisim_vlan + - name: Register Cluster with prism_vlan ntnx_ndb_clusters: nutanix_host: "" nutanix_username: "" diff --git a/plugins/modules/ntnx_ndb_maintenance_tasks.py b/plugins/modules/ntnx_ndb_maintenance_tasks.py index 1b6411f0a..b157f29ed 100644 --- a/plugins/modules/ntnx_ndb_maintenance_tasks.py +++ b/plugins/modules/ntnx_ndb_maintenance_tasks.py @@ -101,7 +101,7 @@ tasks: [] register: result -- name: Add maitenance window task for vm +- name: Add maintenance window task for vm ntnx_ndb_maintenance_tasks: db_server_vms: - name: "{{vm1_name_updated}}" @@ -125,7 +125,7 @@ "accessLevel": null, "dateCreated": "2023-02-25 06:34:44", "dateModified": "2023-02-28 00:00:00", - "description": "anisble-created-window", + "description": "ansible-created-window", "entityTaskAssoc": [ { "accessLevel": null, diff --git a/plugins/modules/ntnx_ndb_maintenance_window.py b/plugins/modules/ntnx_ndb_maintenance_window.py index b8981627c..87a26080c 100644 --- a/plugins/modules/ntnx_ndb_maintenance_window.py +++ b/plugins/modules/ntnx_ndb_maintenance_window.py @@ -9,9 +9,9 @@ DOCUMENTATION = r""" --- module: ntnx_ndb_maintenance_window -short_description: module to create, update and delete mainetance window +short_description: module to create, update and delete maintenance window version_added: 1.8.0 -description: module to create, update and delete mainetance window +description: module to create, update and delete maintenance window options: name: description: @@ -51,11 +51,11 @@ type: str week_of_month: description: - - week of month for maitenance + - week of month for maintenance type: str day_of_week: description: - - day of week for maitenance + - day of week for maintenance type: str extends_documentation_fragment: @@ -71,7 +71,7 @@ - name: create window with weekly schedule ntnx_ndb_maintenance_window: name: "{{window1_name}}" - desc: "anisble-created-window" + desc: "ansible-created-window" schedule: recurrence: "weekly" duration: 2 @@ -83,7 +83,7 @@ - name: create window with monthly schedule ntnx_ndb_maintenance_window: name: "{{window2_name}}" - desc: "anisble-created-window" + desc: "ansible-created-window" schedule: recurrence: "monthly" duration: 2 diff --git a/plugins/modules/ntnx_ndb_maintenance_windows_info.py b/plugins/modules/ntnx_ndb_maintenance_windows_info.py index b2d0c6b61..b7ce07a34 100644 --- a/plugins/modules/ntnx_ndb_maintenance_windows_info.py +++ b/plugins/modules/ntnx_ndb_maintenance_windows_info.py @@ -49,7 +49,7 @@ "accessLevel": null, "dateCreated": "2023-02-25 06:34:44", "dateModified": "2023-02-28 00:00:00", - "description": "anisble-created-window", + "description": "ansible-created-window", "entityTaskAssoc": [ { "accessLevel": null, diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index a617e0433..873e3bee2 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -929,7 +929,7 @@ def check_profile_idempotency(old_spec, new_spec): if len(new_clusters) != len(old_clusters): return False - # update if availibility of cluster is required + # update if availability of cluster is required for cluster in new_clusters: if cluster not in old_clusters: return False diff --git a/plugins/modules/ntnx_ndb_register_database.py b/plugins/modules/ntnx_ndb_register_database.py index 503e23bfb..0cfec9798 100644 --- a/plugins/modules/ntnx_ndb_register_database.py +++ b/plugins/modules/ntnx_ndb_register_database.py @@ -280,7 +280,7 @@ """ EXAMPLES = r""" -- name: regsiter database from registered vm +- name: register database from registered vm ntnx_ndb_register_database: wait: true diff --git a/plugins/modules/ntnx_protection_rules.py b/plugins/modules/ntnx_protection_rules.py index 01c8b9aff..2f36ae8c6 100644 --- a/plugins/modules/ntnx_protection_rules.py +++ b/plugins/modules/ntnx_protection_rules.py @@ -655,7 +655,7 @@ def check_rule_idempotency(rule_spec, update_spec): ].get("category_filter"): return False - # check if availibility zones have updated + # check if availability zones have updated if len(rule_spec["spec"]["resources"]["ordered_availability_zone_list"]) != len( update_spec["spec"]["resources"]["ordered_availability_zone_list"] ): diff --git a/plugins/modules/ntnx_recovery_plans.py b/plugins/modules/ntnx_recovery_plans.py index fc39156f5..6ef6678e3 100644 --- a/plugins/modules/ntnx_recovery_plans.py +++ b/plugins/modules/ntnx_recovery_plans.py @@ -1045,7 +1045,7 @@ def check_recovery_plan_idempotency(old_spec, update_spec): if config not in old_ip_assignments: return False - # comparing availibility zones + # comparing availability zones if ( old_spec["spec"]["resources"]["parameters"]["availability_zone_list"] != update_spec["spec"]["resources"]["parameters"]["availability_zone_list"] diff --git a/plugins/modules/ntnx_service_groups.py b/plugins/modules/ntnx_service_groups.py index 8882494dd..e932e9da5 100644 --- a/plugins/modules/ntnx_service_groups.py +++ b/plugins/modules/ntnx_service_groups.py @@ -84,7 +84,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: False - name: app_srvive_group + name: app_service_group desc: desc service_details: tcp: @@ -102,7 +102,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: False - name: icmp_srvive_group + name: icmp_service_group desc: desc service_details: icmp: diff --git a/plugins/modules/ntnx_static_routes.py b/plugins/modules/ntnx_static_routes.py index edc2e0504..d77e3c2da 100644 --- a/plugins/modules/ntnx_static_routes.py +++ b/plugins/modules/ntnx_static_routes.py @@ -32,7 +32,7 @@ default: false static_routes: description: - - list of static routes to be overriden in vpc. + - list of static routes to be overridden in vpc. - mutually exclusive with C(remove_all_routes) - required incase remove_all_categories is not given - default static route can be mentioned in this with destination - 0.0.0.0/0 diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index 65c7daf88..604135925 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -17,14 +17,14 @@ state: description: - Specify state - - If C(state) is set to C(present) then the opperation will be create the item + - If C(state) is set to C(present) then the operation will be create the item - >- If C(state) is set to C(absent) and if the item exists, then item is removed. - - If C(state) is set to C(power_on) then the opperation will be power on the VM - - If C(state) is set to C(power_off) then the opperation will be power off the VM - - If C(state) is set to C(soft_shutdown) then the opperation will be soft shutdown the VM - - If C(state) is set to C(hard_poweroff) then the opperation will be hard poweroff the VM + - If C(state) is set to C(power_on) then the operation will be power on the VM + - If C(state) is set to C(power_off) then the operation will be power off the VM + - If C(state) is set to C(soft_shutdown) then the operation will be soft shutdown the VM + - If C(state) is set to C(hard_poweroff) then the operation will be hard poweroff the VM choices: - present - absent @@ -199,7 +199,7 @@ empty_cdrom: True cores_per_vcpu: 1 - - name: VM with diffrent disk types and diffrent sizes with UEFI boot type + - name: VM with different disk types and different sizes with UEFI boot type ntnx_vms: state: present name: VM with UEFI boot type @@ -441,17 +441,17 @@ state: soft_shutdown vm_uuid: "{{ vm.vm_uuid }}" - - name: Create VM with minimum requiremnts with hard_poweroff opperation + - name: Create VM with minimum requirements with hard_poweroff operation ntnx_vms: state: hard_poweroff - name: integration_test_opperations_vm + name: integration_test_operations_vm cluster: name: "{{ cluster.name }}" - - name: Create VM with minimum requiremnts with poweroff opperation + - name: Create VM with minimum requirements with poweroff operation ntnx_vms: state: power_off - name: integration_test_opperations_vm + name: integration_test_operations_vm cluster: name: "{{ cluster.name }}" """ @@ -928,9 +928,9 @@ def update_vm(module, result): wait_for_task_completion(module, result, False) response_state = result["response"].get("status") if response_state == "FAILED": - result[ - "warning" - ] = "VM 'soft_shutdown' operation failed, use 'hard_poweroff' instead" + result["warning"] = ( + "VM 'soft_shutdown' operation failed, use 'hard_poweroff' instead" + ) resp = vm.read(vm_uuid) result["response"] = resp diff --git a/plugins/modules/ntnx_vpcs.py b/plugins/modules/ntnx_vpcs.py index 8f7d0c658..3bbf3fea3 100644 --- a/plugins/modules/ntnx_vpcs.py +++ b/plugins/modules/ntnx_vpcs.py @@ -84,14 +84,14 @@ name: vpc_with_dns_servers dns_servers: "{{ dns_servers }}" - - name: Create VPC with all specfactions + - name: Create VPC with all specifications ntnx_vpcs: validate_certs: False state: present nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" - name: vpc_with_add_specfactions + name: vpc_with_add_specifications external_subnets: - subnet_name: "{{ external_subnet.name }}" dns_servers: "{{ dns_servers }}" diff --git a/tests/integration/targets/ntnx_acps/tasks/create_acps.yml b/tests/integration/targets/ntnx_acps/tasks/create_acps.yml index 06125d5b6..fc1490dc0 100644 --- a/tests/integration/targets/ntnx_acps/tasks/create_acps.yml +++ b/tests/integration/targets/ntnx_acps/tasks/create_acps.yml @@ -10,7 +10,6 @@ acp4_name: "{{random_name[0]}}4" acp5_name: "{{random_name[0]}}5" - - name: Create min ACP ntnx_acps: state: present @@ -38,7 +37,7 @@ wait: true name: "{{acp2_name}}" role: - uuid: '{{ acp.role.uuid }}' + uuid: "{{ acp.role.uuid }}" check_mode: false register: result ignore_errors: True @@ -108,7 +107,7 @@ - set_fact: todelete: "{{ todelete + [ result.acp_uuid ] }}" ########################################################## -- name: Create ACP with all specfactions +- name: Create ACP with all specifications ntnx_acps: state: present name: "{{acp4_name}}" @@ -134,7 +133,7 @@ operator: IN rhs: uuid_list: - - "{{ network.dhcp.uuid }}" + - "{{ network.dhcp.uuid }}" - scope_filter: - lhs: CATEGORY operator: IN @@ -172,8 +171,8 @@ - result.response.status.resources.filter_list.context_list.1.entity_filter_expression_list.0.right_hand_side.collection == "ALL" - result.response.status.resources.filter_list.context_list.1.scope_filter_expression_list.0.operator == "IN" - result.response.status.resources.filter_list.context_list.1.scope_filter_expression_list.0.left_hand_side == "CATEGORY" - fail_msg: " Unable to Create ACP all specfactions " - success_msg: " ACP with all specfactions created successfully " + fail_msg: " Unable to Create ACP all specifications " + success_msg: " ACP with all specifications created successfully " - set_fact: todelete: "{{ todelete + [ result.acp_uuid ] }}" diff --git a/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml b/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml index f988ef708..ed8eb9306 100644 --- a/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml +++ b/tests/integration/targets/ntnx_acps/tasks/delete_acp.yml @@ -6,7 +6,7 @@ - set_fact: acp1_name: "{{random_name[0]}}1" -- name: Create ACP with all specfactions +- name: Create ACP with all specifications ntnx_acps: state: present name: "{{acp1_name}}" @@ -18,15 +18,13 @@ - "{{ acp.user_group_uuid }}" filters: - scope_filter: - - - lhs: PROJECT + - lhs: PROJECT operator: IN rhs: uuid_list: - "{{ project.uuid }}" entity_filter: - - - lhs: ALL + - lhs: ALL operator: IN rhs: collection: ALL @@ -47,9 +45,8 @@ - result.response.status.resources.filter_list.context_list.0.scope_filter_expression_list.0.operator == "IN" - result.response.status.resources.filter_list.context_list.0.scope_filter_expression_list.0.left_hand_side == "PROJECT" - result.response.status.resources.filter_list.context_list.0.scope_filter_expression_list.0.right_hand_side.uuid_list.0 == "{{ project.uuid }}" - fail_msg: " Unable to Create ACP all specfactions " - success_msg: " ACP with all specfactions created successfully " - + fail_msg: " Unable to Create ACP all specifications " + success_msg: " ACP with all specifications created successfully " - name: Delete acp ntnx_acps: @@ -65,5 +62,5 @@ - result.response.status == 'SUCCEEDED' - result.failed == false - result.changed == true - fail_msg: " Unable to delete ACP with all specfactions " + fail_msg: " Unable to delete ACP with all specifications " success_msg: " ACP has been deleted successfully " diff --git a/tests/integration/targets/ntnx_address_groups/tasks/create.yml b/tests/integration/targets/ntnx_address_groups/tasks/create.yml index 59a2e0cef..b9705da90 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/create.yml @@ -13,7 +13,6 @@ ag1: "{{random_name}}{{suffix_name}}1" ag2: "{{random_name}}{{suffix_name}}2" - - name: Create address group ntnx_address_groups: state: present @@ -40,7 +39,7 @@ - result.response.ip_address_block_list[1].prefix_length == 32 fail_msg: "Unable to create address group" - success_msg: "Address group created susccessfully" + success_msg: "Address group created successfully" - set_fact: todelete: '{{ result["address_group_uuid"] }}' @@ -97,7 +96,6 @@ ################################################################################################### - - name: cleanup created entities ntnx_address_groups: state: absent diff --git a/tests/integration/targets/ntnx_address_groups/tasks/delete.yml b/tests/integration/targets/ntnx_address_groups/tasks/delete.yml index 1c707f087..520ef13ef 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/delete.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/delete.yml @@ -12,7 +12,6 @@ - set_fact: ag1: "{{random_name}}{{suffix_name}}1" - - name: Create address group ntnx_address_groups: state: present @@ -29,7 +28,7 @@ - test_ag.response is defined - test_ag.changed == True fail_msg: "Unable to create address group" - success_msg: "address group created susccessfully" + success_msg: "address group created successfully" ################################################################################################### diff --git a/tests/integration/targets/ntnx_address_groups/tasks/update.yml b/tests/integration/targets/ntnx_address_groups/tasks/update.yml index f4ebedc50..9bd074121 100644 --- a/tests/integration/targets/ntnx_address_groups/tasks/update.yml +++ b/tests/integration/targets/ntnx_address_groups/tasks/update.yml @@ -2,7 +2,6 @@ - debug: msg: start ntnx_address_groups update tests - - name: Generate random project_name set_fact: random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" @@ -14,7 +13,6 @@ ag1: "{{random_name}}{{suffix_name}}1" ag2: "{{random_name}}{{suffix_name}}2" - ############################################################################################## - name: Create address group with @@ -34,9 +32,8 @@ that: - test_ag.response is defined - test_ag.changed == True - fail_msg: "Unable to create adress group" - success_msg: "Address group created susccessfully" - + fail_msg: "Unable to create address group" + success_msg: "Address group created successfully" ################################################################################################### @@ -64,7 +61,7 @@ - result.response.ip_address_block_list | length == 1 fail_msg: "Unable to update address group" - success_msg: "Address group updated susccessfully" + success_msg: "Address group updated successfully" ################################################################################################### diff --git a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml index 679532a42..3d546343c 100644 --- a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml +++ b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml @@ -131,7 +131,7 @@ fail_msg: "Fail: unable to update existing category by deleting all values " success_msg: "Passed: update existing category by deleting all values finished successfully" ################# -- name: Delte the category +- name: Delete the category ntnx_categories: state: "absent" name: "{{first_category.name}}" diff --git a/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml index b27e210df..5c26f500c 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/image_nodes.yml @@ -1,68 +1,67 @@ --- - - debug: - msg: start testing ntnx_foundation +- debug: + msg: start testing ntnx_foundation - - name: Image nodes using manual and discovery modes. Create cluster - ntnx_foundation: - timeout: 4500 - nutanix_host: "{{foundation_host}}" - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{IBIS_node.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" - ipmi_netmask: "{{IBIS_node.node1.ipmi_netmask}}" - ipmi_gateway: "{{IBIS_node.node1.ipmi_gateway}}" - ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" - ipmi_password: "{{IBIS_node.node1.ipmi_password}}" - hypervisor: "{{IBIS_node.node1.hypervisor}}" - hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" - node_position: "{{IBIS_node.node1.node_position}}" - - discovery_mode: #dos mode using cvm - cvm_gb_ram: 50 - node_serial: "{{IBIS_node.node3.node_serial}}" - device_hint: "vm_installer" - discovery_override: - hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" - hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" - cvm_ip: "{{IBIS_node.node3.cvm_ip}}" - hypervisor: "{{IBIS_node.node3.hypervisor}}" - - discovery_mode: # aos node using ipmi - cvm_gb_ram: 50 - ipmi_password: "{{IBIS_node.node2.ipmi_password}}" - node_serial: "{{IBIS_node.node2.node_serial}}" - discovery_override: - hypervisor_hostname: "IBIS2" - clusters: - - redundancy_factor: 2 - cluster_members: - - "{{IBIS_node.node1.cvm_ip}}" - - "{{IBIS_node.node3.cvm_ip}}" - - "{{IBIS_node.node2.cvm_ip}}" - name: "test-cluster" - register: first_cluster - ignore_errors: True - # when: false # make it true or remove to unskip task - - - name: Creation Status - assert: - that: - - first_cluster.response is defined - - first_cluster.failed==false - - first_cluster.changed==true - - first_cluster.response.cluster_urls is defined - - first_cluster.response.cluster_urls.0.name=="test-cluster" - fail_msg: " Fail : unable to create cluster with three node" - success_msg: "Succes: cluster with three node created successfully " - # when: false # make it true or remove to unskip task +- name: Image nodes using manual and discovery modes. Create cluster + ntnx_foundation: + timeout: 4500 + nutanix_host: "{{foundation_host}}" + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{IBIS_node.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" + ipmi_netmask: "{{IBIS_node.node1.ipmi_netmask}}" + ipmi_gateway: "{{IBIS_node.node1.ipmi_gateway}}" + ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" + ipmi_password: "{{IBIS_node.node1.ipmi_password}}" + hypervisor: "{{IBIS_node.node1.hypervisor}}" + hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" + node_position: "{{IBIS_node.node1.node_position}}" + - discovery_mode: #dos mode using cvm + cvm_gb_ram: 50 + node_serial: "{{IBIS_node.node3.node_serial}}" + device_hint: "vm_installer" + discovery_override: + hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" + hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" + cvm_ip: "{{IBIS_node.node3.cvm_ip}}" + hypervisor: "{{IBIS_node.node3.hypervisor}}" + - discovery_mode: # aos node using ipmi + cvm_gb_ram: 50 + ipmi_password: "{{IBIS_node.node2.ipmi_password}}" + node_serial: "{{IBIS_node.node2.node_serial}}" + discovery_override: + hypervisor_hostname: "IBIS2" + clusters: + - redundancy_factor: 2 + cluster_members: + - "{{IBIS_node.node1.cvm_ip}}" + - "{{IBIS_node.node3.cvm_ip}}" + - "{{IBIS_node.node2.cvm_ip}}" + name: "test-cluster" + register: first_cluster + ignore_errors: True + # when: false # make it true or remove to resume task +- name: Creation Status + assert: + that: + - first_cluster.response is defined + - first_cluster.failed==false + - first_cluster.changed==true + - first_cluster.response.cluster_urls is defined + - first_cluster.response.cluster_urls.0.name=="test-cluster" + fail_msg: " Fail : unable to create cluster with three node" + success_msg: "Success: cluster with three node created successfully " + # when: false # make it true or remove to resume task ###################################################### diff --git a/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml index 86472bf65..087ed1333 100644 --- a/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_foundation/tasks/negative_scenarios.yml @@ -1,119 +1,119 @@ - - name: Image nodes with check mode - check_mode: yes - ntnx_foundation: - timeout: 3660 - nutanix_host: "{{foundation_host}}" - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - manual_mode: - cvm_gb_ram: 50 - cvm_ip: "{{IBIS_node.node1.cvm_ip}}" - hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" - ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" - ipmi_password: "{{IBIS_node.node1.ipmi_password}}" - hypervisor: "{{IBIS_node.node1.hypervisor}}" - hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" - node_position: "{{IBIS_node.node1.node_position}}" - clusters: - - redundancy_factor: 2 - cluster_members: - - "{{IBIS_node.node1.cvm_ip}}" - - "{{IBIS_node.node3.cvm_ip}}" - - "{{IBIS_node.node2.cvm_ip}}" - name: "test-cluster" - register: result +- name: Image nodes with check mode + check_mode: yes + ntnx_foundation: + timeout: 3660 + nutanix_host: "{{foundation_host}}" + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - manual_mode: + cvm_gb_ram: 50 + cvm_ip: "{{IBIS_node.node1.cvm_ip}}" + hypervisor_hostname: "{{IBIS_node.node1.hypervisor_hostname}}" + ipmi_ip: "{{IBIS_node.node1.ipmi_ip}}" + ipmi_password: "{{IBIS_node.node1.ipmi_password}}" + hypervisor: "{{IBIS_node.node1.hypervisor}}" + hypervisor_ip: "{{IBIS_node.node1.hypervisor_ip}}" + node_position: "{{IBIS_node.node1.node_position}}" + clusters: + - redundancy_factor: 2 + cluster_members: + - "{{IBIS_node.node1.cvm_ip}}" + - "{{IBIS_node.node3.cvm_ip}}" + - "{{IBIS_node.node2.cvm_ip}}" + name: "test-cluster" + register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed==false - - result.changed==false - - result.response.blocks.0.nodes.0.cvm_ip=="{{IBIS_node.node1.cvm_ip}}" - - result.response.blocks.0.nodes.0.hypervisor_hostname=="{{IBIS_node.node1.hypervisor_hostname}}" - - result.response.blocks.0.nodes.0.ipmi_ip=="{{IBIS_node.node1.ipmi_ip}}" - - result.response.blocks.0.nodes.0.hypervisor=="{{IBIS_node.node1.hypervisor}}" - - result.response.blocks.0.nodes.0.node_position=="{{IBIS_node.node1.node_position}}" - - result.response.clusters.0.cluster_name=="test-cluster" - fail_msg: " Fail : check_mode fail" - success_msg: "Succes: returned response as expected" +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed==false + - result.changed==false + - result.response.blocks.0.nodes.0.cvm_ip=="{{IBIS_node.node1.cvm_ip}}" + - result.response.blocks.0.nodes.0.hypervisor_hostname=="{{IBIS_node.node1.hypervisor_hostname}}" + - result.response.blocks.0.nodes.0.ipmi_ip=="{{IBIS_node.node1.ipmi_ip}}" + - result.response.blocks.0.nodes.0.hypervisor=="{{IBIS_node.node1.hypervisor}}" + - result.response.blocks.0.nodes.0.node_position=="{{IBIS_node.node1.node_position}}" + - result.response.clusters.0.cluster_name=="test-cluster" + fail_msg: " Fail : check_mode fail" + success_msg: "Success: returned response as expected" ################################### - - debug: - msg: start negative_scenarios for ntnx_foundation +- debug: + msg: start negative_scenarios for ntnx_foundation ################################### - - name: Image nodes with wrong serial - ntnx_foundation: - timeout: 3660 - nutanix_host: "{{foundation_host}}" - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - discovery_mode: - cvm_gb_ram: 50 - node_serial: wrong_serial - device_hint: "vm_installer" - discovery_override: - hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" - hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" - cvm_ip: "{{IBIS_node.node3.cvm_ip}}" - hypervisor: "{{IBIS_node.node3.hypervisor}}" - register: result - ignore_errors: True +- name: Image nodes with wrong serial + ntnx_foundation: + timeout: 3660 + nutanix_host: "{{foundation_host}}" + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - discovery_mode: + cvm_gb_ram: 50 + node_serial: wrong_serial + device_hint: "vm_installer" + discovery_override: + hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" + hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" + cvm_ip: "{{IBIS_node.node3.cvm_ip}}" + hypervisor: "{{IBIS_node.node3.hypervisor}}" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.msg == "Failed generating Image Nodes Spec" - - result.changed==false - - result.failed==true - fail_msg: " Fail : image node with wrong serial done successfully " - success_msg: "Succes: unable to image node with wrong serial " +- name: Creation Status + assert: + that: + - result.msg == "Failed generating Image Nodes Spec" + - result.changed==false + - result.failed==true + fail_msg: " Fail : image node with wrong serial done successfully " + success_msg: "Success: unable to image node with wrong serial " ################################### - - name: Image nodes with wrong hypervisor - ntnx_foundation: - timeout: 3660 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "0" - nos_package: "{{nos_package}}" - blocks: - - block_id: "{{IBIS_node.block_id}}" - nodes: - - discovery_mode: - cvm_gb_ram: 50 - node_serial: wrong_serial - device_hint: "vm_installer" - discovery_override: - hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" - cvm_ip: "{{IBIS_node.node3.cvm_ip}}" - hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" - hypervisor: "phoenix" - register: result - ignore_errors: True +- name: Image nodes with wrong hypervisor + ntnx_foundation: + timeout: 3660 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "0" + nos_package: "{{nos_package}}" + blocks: + - block_id: "{{IBIS_node.block_id}}" + nodes: + - discovery_mode: + cvm_gb_ram: 50 + node_serial: wrong_serial + device_hint: "vm_installer" + discovery_override: + hypervisor_ip: "{{IBIS_node.node3.hypervisor_ip}}" + cvm_ip: "{{IBIS_node.node3.cvm_ip}}" + hypervisor_hostname: "{{IBIS_node.node3.hypervisor_hostname}}" + hypervisor: "phoenix" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.changed==false - - result.failed==true - - "result.msg=='value of hypervisor must be one of: kvm, hyperv, xen, esx, ahv, got: phoenix found in blocks -> nodes -> discovery_mode -> discovery_override'" - fail_msg: " Fail : Image nodes with wrong hypervisor done successfully " - success_msg: "Succes: unable to image node with wrong hypervisor" +- name: Creation Status + assert: + that: + - result.changed==false + - result.failed==true + - "result.msg=='value of hypervisor must be one of: kvm, hyperv, xen, esx, ahv, got: phoenix found in blocks -> nodes -> discovery_mode -> discovery_override'" + fail_msg: " Fail : Image nodes with wrong hypervisor done successfully " + success_msg: "Success: unable to image node with wrong hypervisor" diff --git a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml index d2cca917b..f9600af78 100644 --- a/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml +++ b/tests/integration/targets/ntnx_foundation_aos_packages_info/tasks/get_aos.yml @@ -14,4 +14,4 @@ - result.failed==false - result.changed==false fail_msg: " Fail : unable to get aos_packages " - success_msg: "Succes: got aos_packages successfully " + success_msg: "Success: got aos_packages successfully " diff --git a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml index 9988683a3..19d5e37c6 100644 --- a/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml +++ b/tests/integration/targets/ntnx_foundation_bmc_ipmi_config/tasks/configure_ipmi.yml @@ -24,4 +24,4 @@ - result.response.blocks.0.nodes.0.ipmi_configure_successful==true - result.response.blocks.0.nodes.0.ipmi_message is defined fail_msg: "bmc ipmi configure was failed with error result.error" - success_msg: "bmc ipmi configure was successfull" + success_msg: "bmc ipmi configure was successful" diff --git a/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml index 113c4d3a3..186d0bf2f 100644 --- a/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_central/tasks/image_nodes.yml @@ -42,7 +42,7 @@ hypervisor_hostname: "{{node3.discovery_override.hypervisor_hostname}}" register: result ignore_errors: true - # when: false # make it true or remove to unskip task + # when: false # make it true or remove to resume task - name: Creation Status assert: @@ -51,5 +51,5 @@ - result.failed==false - result.changed==true fail_msg: "fail: Unable to image nodes or create cluster " - success_msg: "succes: Imaging and cluster created successfully " - # when: false # make it true or remove to unskip task + success_msg: "success: Imaging and cluster created successfully " + # when: false # make it true or remove to resume task diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml index 92943493e..95e67fd4f 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys/tasks/create_key.yml @@ -3,7 +3,7 @@ - name: create api key with check_mode ntnx_foundation_central_api_keys: - alias: test + alias: test check_mode: true register: result ignore_errors: true @@ -16,16 +16,15 @@ - result.changed==false - result.response.alias=="test" fail_msg: "fail: Unable to create api key with check_mode: " - success_msg: "succes: api key with check_mode: " + success_msg: "success: api key with check_mode: " - name: Generate random alias for api key set_fact: random_alias: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" - - name: create api key with random alias ntnx_foundation_central_api_keys: - alias: "{{random_alias.0}}" + alias: "{{random_alias.0}}" register: result ignore_errors: true @@ -37,10 +36,10 @@ - result.changed==true - result.response.key_uuid is defined fail_msg: "fail: Unable to create api key " - success_msg: "succes: api key created successfully " + success_msg: "success: api key created successfully " - ntnx_foundation_central_api_keys: - alias: "{{random_alias.0}}" + alias: "{{random_alias.0}}" register: result ignore_errors: true @@ -52,4 +51,4 @@ - result.status_code==400 - result.error is defined fail_msg: "fail: created duplicate api key with same alias " - success_msg: "succes: returned error as expected " + success_msg: "success: returned error as expected " diff --git a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml index c26afa9f1..55a1b4870 100644 --- a/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_api_keys_info/tasks/key_info.yml @@ -8,7 +8,7 @@ - name: create api key with random alias ntnx_foundation_central_api_keys: - alias: "{{random_alias.0}}" + alias: "{{random_alias.0}}" register: key ignore_errors: true @@ -21,7 +21,7 @@ - key.response.key_uuid is defined - key.response.api_key is defined fail_msg: "fail: Unable to create api key " - success_msg: "succes: api key created successfully " + success_msg: "success: api key created successfully " - name: get api key with key_uuid ntnx_foundation_central_api_keys_info: @@ -35,7 +35,7 @@ - result.response is defined - result.response.alias=="{{random_alias.0}}" fail_msg: "fail: Unable to get api key with key_uuid" - success_msg: "succes: get api key with key_uuid " + success_msg: "success: get api key with key_uuid " - name: get api key with alias ntnx_foundation_central_api_keys_info: @@ -49,13 +49,13 @@ - result.response is defined - result.response.0.key_uuid== key.response.key_uuid fail_msg: "fail: Unable to get api key with alias name" - success_msg: "succes: get api key with alias name " + success_msg: "success: get api key with alias name " - name: get api key with custom filter ntnx_foundation_central_api_keys_info: - custom_filter: - created_timestamp: "{{key.response.created_timestamp}}" - alias: "{{key.response.alias}}" + custom_filter: + created_timestamp: "{{key.response.created_timestamp}}" + alias: "{{key.response.alias}}" register: result ignore_errors: true @@ -68,4 +68,4 @@ - result.response.api_keys.0 is defined - result.response.api_keys.0.api_key == key.response.api_key fail_msg: "fail: unable to get api key with custom filter " - success_msg: "succes: get api key with custom filter successfully " + success_msg: "success: get api key with custom filter successfully " diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml index cb248215e..4388db092 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_clusters_info/tasks/get_cluster_info.yml @@ -1,7 +1,6 @@ - debug: msg: start testing ntnx_foundation_central_imaged_clusters_info module - - name: get imaged cluster using image_cluster_uuid ntnx_foundation_central_imaged_clusters_info: filters: @@ -16,8 +15,7 @@ - clusters.failed==false - clusters.response is defined fail_msg: "fail: unable to get all imaged,archived cluster " - success_msg: "succes: get all imaged,archived cluster sucessfuly " - + success_msg: "success: get all imaged,archived cluster successfully " - name: get imaged cluster using image_cluster_uuid ntnx_foundation_central_imaged_clusters_info: @@ -33,7 +31,7 @@ - result.response is defined - result.response.imaged_cluster_uuid == "{{clusters.response.imaged_clusters.0.imaged_cluster_uuid}}" fail_msg: "fail: unable to get imaged cluster using image_cluster_uuid " - success_msg: "succes: get imaged cluster using image_cluster_uuid sucessfuly " + success_msg: "success: get imaged cluster using image_cluster_uuid successfully " - name: get imaged cluster using custom filter ntnx_foundation_central_imaged_clusters_info: @@ -49,9 +47,7 @@ - result.failed==false - result.response.imaged_clusters is defined fail_msg: "fail: unable to get imaged cluster using custom filter " - success_msg: "succes: get imaged cluster using custom filter sucessfully" - - + success_msg: "success: get imaged cluster using custom filter successfully" # still offset and length # - debug: # var: clusters.response diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml index 7fc30a286..43a06ae13 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml @@ -1,7 +1,7 @@ - debug: msg: start testing ntnx_foundation_central_imaged_nodes_info module -- name: get all imaged nodes +- name: get all imaged nodes ntnx_foundation_central_imaged_nodes_info: register: nodes ignore_errors: true @@ -15,7 +15,7 @@ - nodes.response.imaged_nodes is defined - nodes.response.metadata.length > 0 fail_msg: "fail: unable to get all imaged nodes " - success_msg: "succes: get all imaged nodes sucessfully " + success_msg: "success: get all imaged nodes successfully " - name: get node by uuid ntnx_foundation_central_imaged_nodes_info: @@ -31,7 +31,7 @@ - result.response is defined - result.response.node_serial == nodes.response.imaged_nodes.0.node_serial fail_msg: "fail: unable to get node by uuid" - success_msg: "succes: get node by uuid successfully " + success_msg: "success: get node by uuid successfully " - name: get imaged node using custom filter ntnx_foundation_central_imaged_nodes_info: @@ -48,6 +48,5 @@ - result.response.imaged_nodes.0.imaged_node_uuid == nodes.response.imaged_nodes.0.imaged_node_uuid - result.response.metadata.length <=1 fail_msg: "fail: unable to get imaged node using custom filter " - success_msg: "succes: get imaged node using custom filter sucessfully" - + success_msg: "success: get imaged node using custom filter successfully" # still offset and length and filter diff --git a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml index 973b93d41..943538c9b 100644 --- a/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_discover_nodes_info/tasks/discover_nodes.yml @@ -15,7 +15,7 @@ - result.blocks.0.nodes.0.configured==false - result.blocks.0.nodes.0.ipv6_address is defined fail_msg: " Fail : unable to Discover nodes " - success_msg: "Succes: Discover nodes finished successfully " + success_msg: "Success: Discover nodes finished successfully " - name: Discover all nodes ntnx_foundation_discover_nodes_info: @@ -30,8 +30,7 @@ - result.changed==false - result.blocks.0.nodes.0.ipv6_address is defined fail_msg: " Fail : unable to discover all nodes " - success_msg: "Succes: Discover all nodes finished successfully " - + success_msg: "Success: Discover all nodes finished successfully " # - name: Discover nodes and include network info # api fail # ntnx_foundation_discover_nodes_info: # include_network_details: true @@ -44,4 +43,4 @@ # - result.failed==false # - result.changed==false # fail_msg: " Fail : unable to discover nodes and include network info " -# success_msg: "Succes: Discover nodes and include network info finished successfully " +# success_msg: "Success: Discover nodes and include network info finished successfully " diff --git a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml index 500b09e7c..18a3c5587 100644 --- a/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml +++ b/tests/integration/targets/ntnx_foundation_hypervisor_images_info/tasks/get_hypervisors.yml @@ -21,4 +21,4 @@ - result.failed==false - result.changed==false fail_msg: " Fail : unable to get hypervisor_images_info " - success_msg: "Succes: got hypervisor_images_info successfully " + success_msg: "Success: got hypervisor_images_info successfully " diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml index 6794b80fd..27407b7f7 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/negative_scenarios.yml @@ -3,7 +3,7 @@ state: present source: "{{ source }}" filename: "integration-test-ntnx-package.tar.gz" - installer_type: wrong installler type + installer_type: wrong installer type timeout: 3600 register: result ignore_errors: true @@ -13,6 +13,6 @@ that: - result.failed==true - result.changed==false - - "result.msg == 'value of installer_type must be one of: kvm, esx, hyperv, xen, nos, got: wrong installler type'" + - "result.msg == 'value of installer_type must be one of: kvm, esx, hyperv, xen, nos, got: wrong installer type'" fail_msg: " Fail : image uploaded with wrong installer type" - success_msg: "Succes: returned error as expected " + success_msg: "Success: returned error as expected " diff --git a/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml b/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml index 3cc90a7f5..00555ee16 100644 --- a/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml +++ b/tests/integration/targets/ntnx_foundation_image_upload/tasks/upload.yml @@ -24,7 +24,7 @@ - result.failed==false - result.changed==true fail_msg: " Fail : unable to upload image with nos installer_type " - success_msg: "Succes: upload image with nos installer_type successfully " + success_msg: "Success: upload image with nos installer_type successfully " - name: Delete Image with nos installer_type ntnx_foundation_image_upload: @@ -41,4 +41,4 @@ - result.failed==false - result.changed==true fail_msg: " Fail : unable to delete image with nos installer_type " - success_msg: "Succes: image with nos installer_type deleted successfully " + success_msg: "Success: image with nos installer_type deleted successfully " diff --git a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml index b9faec447..5c8327f3e 100644 --- a/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml +++ b/tests/integration/targets/ntnx_foundation_node_network_info/tasks/get_info.yml @@ -23,4 +23,4 @@ - result.nodes.0.ipmi_gateway is defined - result.nodes.0.hypervisor_hostname is defined fail_msg: " Fail : unable to get node network info " - success_msg: "Succes: Got node network info successfully " + success_msg: "Success: Got node network info successfully " diff --git a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml index 46056d0da..bc85994cb 100644 --- a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml +++ b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml @@ -1,214 +1,214 @@ --- - - debug: - msg: start testing ntnx_foundation test for bare metal imaging and cluster creation +- debug: + msg: start testing ntnx_foundation test for bare metal imaging and cluster creation +- name: get aos_packages_info from foundation + ntnx_foundation_aos_packages_info: + register: images - - name: get aos_packages_info from foundation - ntnx_foundation_aos_packages_info: - register: images +- name: Create spec for imaging and creating cluster out of bare metal nodes + check_mode: yes + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" + nos_package: "{{images.aos_packages[0]}}" + blocks: + - block_id: "{{nodes.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{nodes.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" + ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" + ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" + ipmi_ip: "{{nodes.node1.ipmi_ip}}" + ipmi_password: "{{nodes.node1.ipmi_password}}" + hypervisor: "{{nodes.node1.hypervisor}}" + hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" + node_position: "{{nodes.node1.node_position}}" + clusters: + - redundancy_factor: 1 + cluster_members: + - "{{nodes.node1.cvm_ip}}" + name: "test-cluster" + timezone: "Asia/Calcutta" + cvm_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + cvm_dns_servers: + - "{{nodes.dns_servers[0]}}" + - "{{nodes.dns_servers[1]}}" + hypervisor_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + enable_ns: true + backplane_vlan: "{{nodes.backplane_vlan}}" + backplane_subnet: "{{nodes.backplane_subnet}}" + backplane_netmask: "{{nodes.backplane_netmask}}" + register: spec + ignore_errors: True - - name: Create spec for imaging and creating cluster out of bare metal nodes - check_mode: yes - ntnx_foundation: - timeout: 4500 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" - nos_package: "{{images.aos_packages[0]}}" - blocks: - - block_id: "{{nodes.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{nodes.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" - ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" - ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" - ipmi_ip: "{{nodes.node1.ipmi_ip}}" - ipmi_password: "{{nodes.node1.ipmi_password}}" - hypervisor: "{{nodes.node1.hypervisor}}" - hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" - node_position: "{{nodes.node1.node_position}}" - clusters: - - redundancy_factor: 1 - cluster_members: - - "{{nodes.node1.cvm_ip}}" - name: "test-cluster" - timezone: "Asia/Calcutta" - cvm_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - cvm_dns_servers: - - "{{nodes.dns_servers[0]}}" - - "{{nodes.dns_servers[1]}}" - hypervisor_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - enable_ns: true - backplane_vlan: "{{nodes.backplane_vlan}}" - backplane_subnet: "{{nodes.backplane_subnet}}" - backplane_netmask: "{{nodes.backplane_netmask}}" - register: spec - ignore_errors: True +- set_fact: + expected_spec: + { + "blocks": + [ + { + "block_id": "{{nodes.block_id}}", + "nodes": + [ + { + "cvm_gb_ram": 50, + "cvm_ip": "{{nodes.node1.cvm_ip}}", + "hypervisor": "{{nodes.node1.hypervisor}}", + "hypervisor_hostname": "{{nodes.node1.hypervisor_hostname}}", + "hypervisor_ip": "{{nodes.node1.hypervisor_ip}}", + "image_now": true, + "ipmi_gateway": "{{nodes.node1.ipmi_gateway}}", + "ipmi_ip": "{{nodes.node1.ipmi_ip}}", + "ipmi_netmask": "{{nodes.node1.ipmi_netmask}}", + "ipmi_password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "node_position": "{{nodes.node1.node_position}}", + }, + ], + }, + ], + "clusters": + [ + { + "backplane_netmask": "{{nodes.backplane_netmask}}", + "backplane_subnet": "{{nodes.backplane_subnet}}", + "backplane_vlan": "{{nodes.backplane_vlan}}", + "cluster_external_ip": null, + "cluster_init_now": true, + "cluster_members": ["{{nodes.node1.cvm_ip}}"], + "cluster_name": "test-cluster", + "cvm_dns_servers": "{{nodes.dns_servers[0]}},{{nodes.dns_servers[1]}}", + "cvm_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", + "enable_ns": true, + "hypervisor_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", + "redundancy_factor": 1, + "single_node_cluster": true, + "timezone": "Asia/Calcutta", + }, + ], + "current_cvm_vlan_tag": "{{nodes.current_cvm_vlan_tag}}", + "cvm_gateway": "{{cvm_gateway}}", + "cvm_netmask": "{{cvm_netmask}}", + "hypervisor_gateway": "{{hypervisor_gateway}}", + "hypervisor_iso": {}, + "hypervisor_netmask": "{{hypervisor_netmask}}", + "ipmi_user": "{{default_ipmi_user}}", + "nos_package": "{{images.aos_packages[0]}}", + } - - set_fact: - expected_spec: { - "blocks": [ - { - "block_id": "{{nodes.block_id}}", - "nodes": [ - { - "cvm_gb_ram": 50, - "cvm_ip": "{{nodes.node1.cvm_ip}}", - "hypervisor": "{{nodes.node1.hypervisor}}", - "hypervisor_hostname": "{{nodes.node1.hypervisor_hostname}}", - "hypervisor_ip": "{{nodes.node1.hypervisor_ip}}", - "image_now": true, - "ipmi_gateway": "{{nodes.node1.ipmi_gateway}}", - "ipmi_ip": "{{nodes.node1.ipmi_ip}}", - "ipmi_netmask": "{{nodes.node1.ipmi_netmask}}", - "ipmi_password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", - "node_position": "{{nodes.node1.node_position}}" - } - ] - } - ], - "clusters": [ - { - "backplane_netmask": "{{nodes.backplane_netmask}}", - "backplane_subnet": "{{nodes.backplane_subnet}}", - "backplane_vlan": "{{nodes.backplane_vlan}}", - "cluster_external_ip": null, - "cluster_init_now": true, - "cluster_members": [ - "{{nodes.node1.cvm_ip}}" - ], - "cluster_name": "test-cluster", - "cvm_dns_servers": "{{nodes.dns_servers[0]}},{{nodes.dns_servers[1]}}", - "cvm_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", - "enable_ns": true, - "hypervisor_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", - "redundancy_factor": 1, - "single_node_cluster": true, - "timezone": "Asia/Calcutta" - } - ], - "current_cvm_vlan_tag": "{{nodes.current_cvm_vlan_tag}}", - "cvm_gateway": "{{cvm_gateway}}", - "cvm_netmask": "{{cvm_netmask}}", - "hypervisor_gateway": "{{hypervisor_gateway}}", - "hypervisor_iso": {}, - "hypervisor_netmask": "{{hypervisor_netmask}}", - "ipmi_user": "{{default_ipmi_user}}", - "nos_package": "{{images.aos_packages[0]}}" - } +- name: Verify spec + assert: + that: + - spec.response is defined + - spec.failed==false + - spec.changed==false + - spec.response == expected_spec + fail_msg: " Fail : unable to create spec for imaging nodes" + success_msg: "Success: spec generated successfully" - - name: Verify spec - assert: - that: - - spec.response is defined - - spec.failed==false - - spec.changed==false - - spec.response == expected_spec - fail_msg: " Fail : unable to create spec for imaging nodes" - success_msg: "Succes: spec generated successfully" +- name: Image nodes without cluster creation + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" + nos_package: "{{images.aos_packages[0]}}" + blocks: + - block_id: "{{nodes.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{nodes.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" + ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" + ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" + ipmi_ip: "{{nodes.node1.ipmi_ip}}" + ipmi_password: "{{nodes.node1.ipmi_password}}" + hypervisor: "{{nodes.node1.hypervisor}}" + hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" + node_position: "{{nodes.node1.node_position}}" + bond_lacp_rate: "{{nodes.node1.bond_lacp_rate}}" + bond_mode: "{{nodes.node1.bond_mode}}" - - name: Image nodes without cluster creation - ntnx_foundation: - timeout: 4500 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" - nos_package: "{{images.aos_packages[0]}}" - blocks: - - block_id: "{{nodes.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{nodes.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" - ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" - ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" - ipmi_ip: "{{nodes.node1.ipmi_ip}}" - ipmi_password: "{{nodes.node1.ipmi_password}}" - hypervisor: "{{nodes.node1.hypervisor}}" - hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" - node_position: "{{nodes.node1.node_position}}" - bond_lacp_rate: "{{nodes.node1.bond_lacp_rate}}" - bond_mode: "{{nodes.node1.bond_mode}}" + register: result + no_log: true + ignore_errors: True - register: result - no_log: true - ignore_errors: True +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed==false + - result.changed==true + fail_msg: " Fail : unable to image nodes" + success_msg: "Success: node imaging done successfully" - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed==false - - result.changed==true - fail_msg: " Fail : unable to image nodes" - success_msg: "Succes: node imaging done successfully" - - - name: Image nodes and create cluster out of it - ntnx_foundation: - timeout: 4500 - cvm_gateway: "{{cvm_gateway}}" - cvm_netmask: "{{cvm_netmask}}" - hypervisor_gateway: "{{hypervisor_gateway}}" - hypervisor_netmask: "{{hypervisor_netmask}}" - default_ipmi_user: "{{default_ipmi_user}}" - current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" - nos_package: "{{images.aos_packages[0]}}" - blocks: - - block_id: "{{nodes.block_id}}" - nodes: - - manual_mode: - cvm_ip: "{{nodes.node1.cvm_ip}}" - cvm_gb_ram: 50 - hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" - ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" - ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" - ipmi_ip: "{{nodes.node1.ipmi_ip}}" - ipmi_password: "{{nodes.node1.ipmi_password}}" - hypervisor: "{{nodes.node1.hypervisor}}" - hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" - node_position: "{{nodes.node1.node_position}}" - bond_lacp_rate: "{{nodes.node1.bond_lacp_rate}}" - bond_mode: "{{nodes.node1.bond_mode}}" - clusters: - - redundancy_factor: 1 - cluster_members: - - "{{nodes.node1.cvm_ip}}" - name: "test-cluster" - timezone: "Asia/Calcutta" - cvm_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - cvm_dns_servers: - - "{{nodes.dns_servers[0]}}" - - "{{nodes.dns_servers[1]}}" - hypervisor_ntp_servers: - - "{{nodes.ntp_servers[0]}}" - - "{{nodes.ntp_servers[1]}}" - register: result - no_log: true - ignore_errors: True - - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed==false - - result.changed==true - - result.response.cluster_urls is defined - fail_msg: " Fail : unable to image nodes and create cluster" - success_msg: "Succes: cluster and node imaging done successfully" +- name: Image nodes and create cluster out of it + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" + nos_package: "{{images.aos_packages[0]}}" + blocks: + - block_id: "{{nodes.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{nodes.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" + ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" + ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" + ipmi_ip: "{{nodes.node1.ipmi_ip}}" + ipmi_password: "{{nodes.node1.ipmi_password}}" + hypervisor: "{{nodes.node1.hypervisor}}" + hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" + node_position: "{{nodes.node1.node_position}}" + bond_lacp_rate: "{{nodes.node1.bond_lacp_rate}}" + bond_mode: "{{nodes.node1.bond_mode}}" + clusters: + - redundancy_factor: 1 + cluster_members: + - "{{nodes.node1.cvm_ip}}" + name: "test-cluster" + timezone: "Asia/Calcutta" + cvm_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + cvm_dns_servers: + - "{{nodes.dns_servers[0]}}" + - "{{nodes.dns_servers[1]}}" + hypervisor_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + register: result + no_log: true + ignore_errors: True +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed==false + - result.changed==true + - result.response.cluster_urls is defined + fail_msg: " Fail : unable to image nodes and create cluster" + success_msg: "Success: cluster and node imaging done successfully" ###################################################### diff --git a/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml b/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml index 3f0087324..b33fd6a04 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml +++ b/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml @@ -33,7 +33,7 @@ ############################################################################################# -- name: test idempotency by definig same spec as before +- name: test idempotency by defining same spec as before ntnx_image_placement_policy: state: present policy_uuid: "{{ setup_policy.response.metadata.uuid }}" diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml index bfbc770df..34243e48c 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml @@ -332,7 +332,7 @@ - result.changed == false - result.failed == false - result.msg == "Nothing to change." - fail_msg: "Fail: idempotecy check fail " + fail_msg: "Fail: idempotency check fail " success_msg: "Passed: Returned as expected " ################################# - name: try to update node pool config with wrong labels diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml index 3cc7c5541..58a471a17 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml @@ -16,7 +16,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -46,8 +46,8 @@ - result.failed == true - result.error == "cpu cannot be less then 4" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with cpu less than minimum" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with cpu less than minimum" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with memory_gb less than minimum ntnx_karbon_clusters: @@ -59,7 +59,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -89,8 +89,8 @@ - result.failed == true - result.error == "memory_gb cannot be less then 8" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with memory_gb size less than minimum" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with memory_gb size less than minimum" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with wrong num_instances for master nodes ntnx_karbon_clusters: @@ -102,7 +102,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -132,8 +132,8 @@ - result.failed == true - result.error == "value of masters.num_instances must be 1 or 2" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with wrong num_instances for master nodes" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with wrong num_instances for master nodes" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with wrong num_instances for etcd nodes ntnx_karbon_clusters: @@ -145,7 +145,7 @@ node_subnet: uuid: "{{network.dhcp.uuid}}" cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" + node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" network_provider: Calico @@ -175,6 +175,6 @@ - result.failed == true - result.error == "value of etcd.num_instances must be 1, 3 or 5" - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with wrong num_instances for etcd nodes" - success_msg: " Pass: Retunred as expected" + fail_msg: " Fail: cluster created with wrong num_instances for etcd nodes" + success_msg: " Pass: Returned as expected" ############################# diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml index cf88b97bf..fa0fa595e 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml @@ -1,5 +1,4 @@ --- - - debug: msg: "start ntnx_karbon_registries tests" @@ -10,7 +9,6 @@ - set_fact: registry_name: "{{random_name[0]}}" - - name: create registry with check_mode ntnx_karbon_registries: name: "{{registry_name}}" @@ -27,8 +25,8 @@ - result.changed == false - result.response.name == "{{registry_name}}" - result.response.url == "{{url}}" - success_msg: ' Success: returned response as expected ' - fail_msg: ' Fail: create registry with check_mode ' + success_msg: " Success: returned response as expected " + fail_msg: " Fail: create registry with check_mode " ################################################################ - name: create registry ntnx_karbon_registries: @@ -46,7 +44,7 @@ - result.changed == true - result.response.name == "{{registry_name}}" - result.response.uuid is defined - fail_msg: "Fail: Unable to create registery" + fail_msg: "Fail: Unable to create registry" success_msg: "Pass: create registry finished successfully" ################################################################ - name: delete registry diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml index cbe281e4d..cb1a4ae1a 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml @@ -4,7 +4,7 @@ - name: create registry with wrong port number ntnx_karbon_registries: - name: test_regitry + name: test_registry url: "{{url}}" port: 501 register: result @@ -16,5 +16,5 @@ - result.response is defined - result.failed == true - result.changed == false - fail_msg: "Fail: create registery with wrong port number finished successfully" + fail_msg: "Fail: create registry with wrong port number finished successfully" success_msg: "Pass: Returned as expected " diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml index 57efb4ece..6cdf767e8 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml @@ -3,7 +3,7 @@ # This playbook will test below cases: # 1. Create HA instance spec with check mode and minimal spec # 2. Create HA postgres database instance with multicluster nodes -# 3. Create HA postgres database instance with static IP and cluster IP assigments +# 3. Create HA postgres database instance with static IP and cluster IP assignments - debug: msg: "start ndb databases test flow for testing high availability databases" diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml index 232112e75..b3313d363 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml @@ -2,38 +2,36 @@ - debug: msg: Start testing ntnx_ndb_clusters - - name: Register cluster with prism_vlan in check mode ntnx_ndb_clusters: - name: "{{cluster.cluster3.name}}" - desc: "{{cluster.cluster3.desc}}" - name_prefix: "{{cluster.cluster3.name_prefix}}" - cluster_ip: "{{cluster.cluster3.cluster_ip}}" - cluster_credentials: - username: "{{cluster.cluster3.cluster_credentials.username}}" - password: "{{cluster.cluster3.cluster_credentials.password}}" - agent_network: - dns_servers: - - "{{cluster.cluster3.agent_network.dns_servers[0]}}" - - "{{cluster.cluster3.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - storage_container: "{{cluster.cluster3.storage_container}}" + name: "{{cluster.cluster3.name}}" + desc: "{{cluster.cluster3.desc}}" + name_prefix: "{{cluster.cluster3.name_prefix}}" + cluster_ip: "{{cluster.cluster3.cluster_ip}}" + cluster_credentials: + username: "{{cluster.cluster3.cluster_credentials.username}}" + password: "{{cluster.cluster3.cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{cluster.cluster3.agent_network.dns_servers[0]}}" + - "{{cluster.cluster3.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + storage_container: "{{cluster.cluster3.storage_container}}" register: result ignore_errors: true check_mode: true - - name: check listing status assert: that: @@ -50,36 +48,36 @@ - result.response.networksInfo[0].networkInfo[2].value == "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - result.response.networksInfo[0].networkInfo[3].value == "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - result.response.networksInfo[0].type== "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - fail_msg: "fail: Wring with check mode for registring cluster" - success_msg: "pass: retunred as expected" + fail_msg: "fail: Wring with check mode for registering cluster" + success_msg: "pass: returned as expected" - name: Register cluster with prism_vlan ntnx_ndb_clusters: - wait: true - name: "{{cluster.cluster3.name}}" - desc: "{{cluster.cluster3.desc}}" - name_prefix: "{{cluster.cluster3.name_prefix}}" - cluster_ip: "{{cluster.cluster3.cluster_ip}}" - cluster_credentials: - username: "{{cluster.cluster3.cluster_credentials.username}}" - password: "{{cluster.cluster3.cluster_credentials.password}}" - agent_network: - dns_servers: - - "{{cluster.cluster3.agent_network.dns_servers[0]}}" - - "{{cluster.cluster3.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - storage_container: "{{cluster.cluster3.storage_container}}" + wait: true + name: "{{cluster.cluster3.name}}" + desc: "{{cluster.cluster3.desc}}" + name_prefix: "{{cluster.cluster3.name_prefix}}" + cluster_ip: "{{cluster.cluster3.cluster_ip}}" + cluster_credentials: + username: "{{cluster.cluster3.cluster_credentials.username}}" + password: "{{cluster.cluster3.cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{cluster.cluster3.agent_network.dns_servers[0]}}" + - "{{cluster.cluster3.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + storage_container: "{{cluster.cluster3.storage_container}}" register: result ignore_errors: true no_log: true @@ -93,16 +91,16 @@ - result.response.name == "{{cluster.cluster3.name}}" - result.response.description == "{{cluster.cluster3.desc}}" - result.response.ipAddresses[0] == "{{cluster.cluster3.cluster_ip}}" - fail_msg: "fail: Unable to Register cluster with prisim_vlan" - success_msg: "pass: Register cluster with prisim_vlan finished successfully" + fail_msg: "fail: Unable to Register cluster with prism_vlan" + success_msg: "pass: Register cluster with prism_vlan finished successfully" ################################################################ - name: update cluster name , desc ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - name: newname - desc: newdesc + uuid: "{{result.cluster_uuid}}" + name: newname + desc: newdesc register: result ignore_errors: true no_log: true @@ -116,14 +114,14 @@ fail_msg: "fail: Unable to update cluster name , desc" success_msg: "pass: update cluster name , desc finished successfully" - set_fact: - todelete: "{{result.cluster_uuid}}" + todelete: "{{result.cluster_uuid}}" ################################################################ -- name: update cluster credeential in check_mode +- name: update cluster credential in check_mode ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - cluster_credentials: - username: test - password: test + uuid: "{{result.cluster_uuid}}" + cluster_credentials: + username: test + password: test register: result ignore_errors: true no_log: true @@ -138,14 +136,14 @@ - result.response.username is defined - result.response.password is defined - result.cluster_uuid is defined - fail_msg: "fail: update cluster credeential while check_mode" - success_msg: "pass: retunred as expected" + fail_msg: "fail: update cluster credential while check_mode" + success_msg: "pass: returned as expected" ################################################################ -- name: Negative Secnarios update storage container +- name: Negative Scenarios update storage container ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - storage_container: "{{cluster.cluster3.storage_container}}" + uuid: "{{result.cluster_uuid}}" + storage_container: "{{cluster.cluster3.storage_container}}" register: out ignore_errors: true no_log: true @@ -156,21 +154,21 @@ - out.changed == false - out.failed == true - out.msg == "parameters are mutually exclusive: uuid|storage_container" - fail_msg: "Fail: storage_continer updated " + fail_msg: "Fail: storage_container updated " success_msg: " Success: returned error as expected " ################################################################ -- name: Negative Secnarios update vlan access +- name: Negative Scenarios update vlan access ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - vlan_access: - prism_vlan: - vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" - vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" - static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" - gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" - subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + uuid: "{{result.cluster_uuid}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" register: out ignore_errors: true no_log: true @@ -186,18 +184,18 @@ ################################################################ -- name: Negative Secnarios update agent network +- name: Negative Scenarios update agent network ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - agent_network: - dns_servers: - - "{{cluster.cluster3.agent_network.dns_servers[0]}}" - - "{{cluster.cluster3.agent_network.dns_servers[1]}}" - ntp_servers: - - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" - - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + uuid: "{{result.cluster_uuid}}" + agent_network: + dns_servers: + - "{{cluster.cluster3.agent_network.dns_servers[0]}}" + - "{{cluster.cluster3.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" register: out ignore_errors: true no_log: true @@ -213,10 +211,10 @@ ################################################################ -- name: Negative Secnarios update agent network +- name: Negative Scenarios update agent network ntnx_ndb_clusters: - uuid: "{{result.cluster_uuid}}" - name_prefix: "{{cluster.cluster3.name_prefix}}" + uuid: "{{result.cluster_uuid}}" + name_prefix: "{{cluster.cluster3.name_prefix}}" register: out ignore_errors: true no_log: true @@ -322,8 +320,8 @@ - name: delete cluster ntnx_ndb_clusters: - uuid: "{{todelete}}" - state: absent + uuid: "{{todelete}}" + state: absent register: result ignore_errors: true no_log: true @@ -338,6 +336,5 @@ fail_msg: "Unable to delete custer" success_msg: "cluster deleted successfully" - - set_fact: - todelete: [] + todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml index 882a78bb5..9d5b85193 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml +++ b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml @@ -16,14 +16,13 @@ - set_fact: db1_name: "{{random_name[0]}}" - clone_db1: "{{random_name[0]}}-clone" + clone_db1: "{{random_name[0]}}-clone" vm1_name: "{{random_name[0]}}-vm" tm1: "{{random_name[0]}}-time-machine" snapshot_name: "{{random_name[0]}}-snapshot" ############################################ setup db and its snapshot for clone tests ########################################### - - name: create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true @@ -92,7 +91,7 @@ - name: create manual snapshot of database ntnx_ndb_database_snapshots: - time_machine_uuid: "{{time_machine_uuid}}" + time_machine_uuid: "{{time_machine_uuid}}" name: "{{snapshot_name}}" register: result @@ -112,7 +111,6 @@ ############################################ create clone on new db server vm tests ########################################### - - name: create spec for clone of database created above on new db server vm check_mode: yes ntnx_ndb_database_clones: @@ -160,76 +158,73 @@ ansible-clones: ansible-test-db-clones register: result - - - set_fact: - expected_response: { - "actionArguments": [ - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_clone_cmd", - "value": "ls" - }, - { - "name": "post_clone_cmd", - "value": "ls -a" - }, - { - "name": "dbserver_description", - "value": "vm for db server" - } - ], - "clustered": false, - "computeProfileId": "{{compute_profile.uuid}}", - "createDbserver": true, - "databaseParameterProfileId": "{{db_params_profile.uuid}}", - "description": "ansible-created-clone", - "latestSnapshot": false, - "lcmConfig": { - "databaseLCMConfig": { - "expiryDetails": { - "deleteDatabase": true, - "expireInDays": 2, - "expiryDateTimezone": "Asia/Calcutta", - "remindBeforeInDays": 1 - }, - "refreshDetails": { - "refreshDateTimezone": "Asia/Calcutta", - "refreshInDays": 2, - "refreshTime": "12:00:00" - } - } + expected_response: + { + "actionArguments": + [ + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + { "name": "pre_clone_cmd", "value": "ls" }, + { "name": "post_clone_cmd", "value": "ls -a" }, + { "name": "dbserver_description", "value": "vm for db server" }, + ], + "clustered": false, + "computeProfileId": "{{compute_profile.uuid}}", + "createDbserver": true, + "databaseParameterProfileId": "{{db_params_profile.uuid}}", + "description": "ansible-created-clone", + "latestSnapshot": false, + "lcmConfig": + { + "databaseLCMConfig": + { + "expiryDetails": + { + "deleteDatabase": true, + "expireInDays": 2, + "expiryDateTimezone": "Asia/Calcutta", + "remindBeforeInDays": 1, + }, + "refreshDetails": + { + "refreshDateTimezone": "Asia/Calcutta", + "refreshInDays": 2, + "refreshTime": "12:00:00", + }, + }, + }, + "name": "{{clone_db1}}", + "networkProfileId": "{{network_profile.uuid}}", + "nodeCount": 1, + "nodes": + [ + { + "computeProfileId": "{{compute_profile.uuid}}", + "networkProfileId": "{{network_profile.uuid}}", + "nxClusterId": "{{cluster.cluster1.uuid}}", + "properties": [], + "vmName": "{{vm1_name}}", + }, + ], + "nxClusterId": "{{cluster.cluster1.uuid}}", + "snapshotId": null, + "sshPublicKey": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "tags": + [ + { + "tagId": "{{tags.clones.uuid}}", + "tagName": "ansible-clones", + "value": "ansible-test-db-clones", }, - "name": "{{clone_db1}}", - "networkProfileId": "{{network_profile.uuid}}", - "nodeCount": 1, - "nodes": [ - { - "computeProfileId": "{{compute_profile.uuid}}", - "networkProfileId": "{{network_profile.uuid}}", - "nxClusterId": "{{cluster.cluster1.uuid}}", - "properties": [], - "vmName": "{{vm1_name}}" - } - ], - "nxClusterId": "{{cluster.cluster1.uuid}}", - "snapshotId": null, - "sshPublicKey": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", - "tags": [ - { - "tagId": "{{tags.clones.uuid}}", - "tagName": "ansible-clones", - "value": "ansible-test-db-clones" - } - ], - "timeMachineId": "{{time_machine_uuid}}", - "timeZone": "UTC", - "userPitrTimestamp": "2023-02-04 07:29:36", - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } + ], + "timeMachineId": "{{time_machine_uuid}}", + "timeZone": "UTC", + "userPitrTimestamp": "2023-02-04 07:29:36", + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + } - name: Check mode status assert: @@ -328,7 +323,6 @@ ############################################ clone update and removal/refresh schedules related tests ########################################### - - name: update name, desc, tags and schedules ntnx_ndb_database_clones: uuid: "{{clone_uuid}}" @@ -370,7 +364,7 @@ - result.response.tags[0].value == "ansible-test-db-clones-updated" fail_msg: "Unable to update clone" - success_msg: "Database clone updated succefully" + success_msg: "Database clone updated successfully" - name: check idempotency ntnx_ndb_database_clones: @@ -393,8 +387,6 @@ ansible-clones: ansible-test-db-clones-updated register: result - - - name: check idempotency status assert: that: @@ -404,7 +396,6 @@ fail_msg: "database clone got updated" success_msg: "database clone update got skipped due to no state changes" - - name: remove schedules ntnx_ndb_database_clones: uuid: "{{clone_uuid}}" @@ -417,8 +408,6 @@ register: result - - - name: Check schedule remove status assert: that: @@ -429,11 +418,10 @@ - result.response.lcmConfig.expiryDetails == None - result.response.lcmConfig.refreshDetails == None fail_msg: "schedules update failed" - success_msg: "schedules removed succefully" + success_msg: "schedules removed successfully" ########################################### refresh clone ########################################### - - name: create spec for refresh clone to a pitr timestamp check_mode: yes ntnx_ndb_database_clone_refresh: @@ -442,7 +430,6 @@ timezone: "UTC" register: result - - name: Check refresh db with pitr spec assert: that: @@ -453,15 +440,12 @@ fail_msg: "creation refresh db clone spec failed" success_msg: "refresh db clone spec created successfully" - - name: refresh db clone ntnx_ndb_database_clone_refresh: uuid: "{{clone_uuid}}" snapshot_uuid: "{{snapshot_uuid}}" register: result - - - name: Check database refresh status assert: that: @@ -470,11 +454,10 @@ - result.uuid is defined - result.response.status == "READY" fail_msg: "database refresh failed" - success_msg: "database refresh completed succefully" + success_msg: "database refresh completed successfully" ########################################### delete clone tests########################################### - - name: create soft remove spec check_mode: yes ntnx_ndb_database_clones: @@ -483,8 +466,6 @@ soft_remove: true register: result - - - name: verify soft remove spec assert: that: @@ -496,8 +477,6 @@ fail_msg: "creation of spec for soft remove failed" success_msg: "spec for soft remove created successfully" - - - name: create unregistration spec check_mode: yes ntnx_ndb_database_clones: @@ -505,8 +484,6 @@ uuid: "{{clone_uuid}}" register: result - - - name: verify unregistration spec assert: that: @@ -525,8 +502,6 @@ delete_from_vm: true register: result - - - name: verify status of db clone delete assert: that: @@ -538,7 +513,6 @@ ########################################### authorize and deauthorize db server vms########################################### - - name: authorize db server vms ntnx_ndb_authorize_db_server_vms: time_machine: @@ -547,8 +521,6 @@ - name: "{{vm1_name}}" register: result - - - name: verify status of authorization of db server vms assert: that: @@ -567,8 +539,6 @@ - name: "{{vm1_name}}" register: result - - - name: verify status of deauthorization of db server vms assert: that: @@ -578,7 +548,6 @@ fail_msg: "database deauthorization with time machine failed" success_msg: "database deauthorization with time machine went successfully" - - name: authorize db server vms for hosting clone ntnx_ndb_authorize_db_server_vms: time_machine: @@ -587,7 +556,6 @@ - name: "{{vm1_name}}" register: result - - name: verify status of authorization of db server vms assert: that: @@ -599,7 +567,6 @@ ############################################ clone on authorized db server vm ########################################### - - set_fact: timestamp: "2123-11-08 12:36:15" - name: create clone using snapshot on authorized server @@ -636,8 +603,6 @@ ansible-clones: ansible-test-db-clones register: result - - - name: Clone create status on authorized db server vm assert: that: @@ -652,7 +617,7 @@ - result.response.databaseNodes[0].dbserverId == db_server_uuid - result.response.parentTimeMachineId == time_machine_uuid fail_msg: "Unable to create clone" - success_msg: "Database clone created succefully" + success_msg: "Database clone created successfully" - set_fact: delete_clone_uuid: "{{result.uuid}}" @@ -683,8 +648,6 @@ - name: "{{vm1_name}}" register: result - - - name: verify status of authorization of db server vms assert: that: @@ -728,8 +691,6 @@ ansible-clones: ansible-test-db-clones register: result - - - name: Clone create status on authorized db server vm assert: that: @@ -746,8 +707,6 @@ fail_msg: "Unable to create clone from latest snapshot" success_msg: "Database clone created from latest snapshot successfully" - - - set_fact: delete_clone_uuid: "{{result.uuid}}" @@ -800,7 +759,6 @@ success_msg: "get era clones using it's id successfully" ################################################################ - - name: get era clones with incorrect name ntnx_ndb_clones_info: name: "abcd" @@ -825,7 +783,6 @@ delete_from_vm: true register: result - - name: verify status of db clone delete assert: that: @@ -835,7 +792,6 @@ fail_msg: "database delete failed" success_msg: "database delete successfully" - - name: delete db server vm ntnx_ndb_db_server_vms: state: "absent" @@ -852,7 +808,6 @@ fail_msg: "db server vm deleted failed" success_msg: "db server vm deleted successfully" - - name: delete database created earlier ntnx_ndb_databases: state: "absent" diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml index 7387bff35..eaeea2156 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -24,7 +24,6 @@ ############################################ setup db ########################################### - - name: create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true @@ -91,7 +90,6 @@ - set_fact: db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" - ############################################ snapshots test ########################################### - name: create snapshot create spec @@ -107,27 +105,23 @@ register: result - set_fact: - expected_response: { + expected_response: + { "changed": false, "error": null, "failed": false, - "response": { - "lcmConfig": { - "snapshotLCMConfig": { - "expiryDetails": { - "expireInDays": 4, - } - } - }, + "response": + { + "lcmConfig": + { + "snapshotLCMConfig": { "expiryDetails": { "expireInDays": 4 } }, + }, "name": "{{snapshot_name}}", - "replicateToClusterIds": [ - "{{cluster.cluster1.uuid}}", - "test_uuid2", - "test_uuid3" - ] - }, - "snapshot_uuid": null - } + "replicateToClusterIds": + ["{{cluster.cluster1.uuid}}", "test_uuid2", "test_uuid3"], + }, + "snapshot_uuid": null, + } - name: Check mode status assert: @@ -139,14 +133,12 @@ fail_msg: "Unable to create snapshot create spec" success_msg: "Snapshot create spec generated successfully using check mode" - - name: create snapshot with minimal spec ntnx_ndb_database_snapshots: name: "{{snapshot_name}}1" time_machine_uuid: "{{time_machine_uuid}}" register: result - - name: snapshot create status assert: that: @@ -165,7 +157,6 @@ expiry_days: 4 register: result - - set_fact: snapshot_uuid: "{{result.snapshot_uuid}}" @@ -181,8 +172,6 @@ fail_msg: "Unable to create snapshot with expiry config" success_msg: "Snapshot with expiry config created successfully" - - - name: rename snapshot ntnx_ndb_database_snapshots: snapshot_uuid: "{{snapshot_uuid}}" @@ -200,8 +189,6 @@ fail_msg: "Unable to rename snapshot" success_msg: "Snapshot renamed successfully" - - - name: update expiry ntnx_ndb_database_snapshots: snapshot_uuid: "{{snapshot_uuid}}" @@ -219,8 +206,6 @@ fail_msg: "Unable to update snapshot expiry" success_msg: "snapshot expiry updated successfully" - - - name: remove expiry schedule ntnx_ndb_database_snapshots: snapshot_uuid: "{{snapshot_uuid}}" @@ -238,7 +223,6 @@ fail_msg: "Unable to remove snapshot expiry schedule" success_msg: "snapshot expiry schedule removed successfully" - - name: Add expiry schedule and rename ntnx_ndb_database_snapshots: snapshot_uuid: "{{snapshot_uuid}}" @@ -259,7 +243,6 @@ fail_msg: "Unable to add expiry schedule and rename it" success_msg: "Snapshot updated successfully" - - name: Idempotency check ntnx_ndb_database_snapshots: snapshot_uuid: "{{snapshot_uuid}}" @@ -275,7 +258,6 @@ fail_msg: "snapshot got updated" success_msg: "snapshot update got skipped due to no state changes" - ############################################ log catchup ###################################### - name: create spec for log catchup @@ -285,35 +267,29 @@ register: result - set_fact: - expected_response: { + expected_response: + { "changed": false, "error": null, "failed": false, - "response": { - "actionArguments": [ - { - "name": "preRestoreLogCatchup", - "value": false - }, - { - "name": "switch_log", - "value": true - } - ], - "forRestore": false - } - } - - + "response": + { + "actionArguments": + [ + { "name": "preRestoreLogCatchup", "value": false }, + { "name": "switch_log", "value": true }, + ], + "forRestore": false, + }, + } - name: Check mode status assert: that: - result == expected_response - fail_msg: "Unable to create log catcup spec" + fail_msg: "Unable to create log catchup spec" success_msg: "log catchup spec created successfully" - - name: create spec for log catchup for restore check_mode: yes ntnx_ndb_database_log_catchup: @@ -322,34 +298,29 @@ register: result - set_fact: - expected_response: { + expected_response: + { "changed": false, "error": null, "failed": false, - "response": { - "actionArguments": [ - { - "name": "preRestoreLogCatchup", - "value": True - }, - { - "name": "switch_log", - "value": true - } - ], - "forRestore": true - } - } - + "response": + { + "actionArguments": + [ + { "name": "preRestoreLogCatchup", "value": True }, + { "name": "switch_log", "value": true }, + ], + "forRestore": true, + }, + } - name: Check mode status assert: that: - result == expected_response - fail_msg: "Unable to create log catcup spec" + fail_msg: "Unable to create log catchup spec" success_msg: "log catchup spec created successfully" - - name: perform log catchup ntnx_ndb_database_log_catchup: time_machine_uuid: "{{time_machine_uuid}}" @@ -377,32 +348,28 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "db_uuid": null, "error": null, "failed": false, - "response": { - "actionArguments": [ - { - "name": "sameLocation", - "value": true - } - ], + "response": + { + "actionArguments": [{ "name": "sameLocation", "value": true }], "latestSnapshot": null, "snapshotId": null, "timeZone": "UTC", - "userPitrTimestamp": "2023-01-02 11:02:22" - } - } + "userPitrTimestamp": "2023-01-02 11:02:22", + }, + } - name: Check mode status assert: that: - result == expected_result fail_msg: "Unable to create restore using pitr timestamp spec" - success_msg: "Spec for databas restore using pitr timetsmap created successfully" - + success_msg: "Spec for database restore using pitr timestamp created successfully" - name: create restore database spec with latest snapshot check_mode: yes @@ -411,34 +378,28 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "db_uuid": null, "error": null, "failed": false, - "response": { - "actionArguments": [ - { - "name": "sameLocation", - "value": true - } - ], + "response": + { + "actionArguments": [{ "name": "sameLocation", "value": true }], "latestSnapshot": true, "snapshotId": null, "timeZone": null, - "userPitrTimestamp": null - } - } - + "userPitrTimestamp": null, + }, + } - name: Check mode status assert: that: - result == expected_result fail_msg: "Unable to create restore using latest snapshot spec" - success_msg: "Spec for databas restore using latest snapshot created successfully" - - + success_msg: "Spec for database restore using latest snapshot created successfully" - name: create restore database spec using snapshot uuid check_mode: yes @@ -448,32 +409,28 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "db_uuid": null, "error": null, "failed": false, - "response": { - "actionArguments": [ - { - "name": "sameLocation", - "value": true - } - ], + "response": + { + "actionArguments": [{ "name": "sameLocation", "value": true }], "latestSnapshot": null, "snapshotId": "{{snapshot_uuid}}", "timeZone": null, - "userPitrTimestamp": null - } - } + "userPitrTimestamp": null, + }, + } - name: Check mode status assert: that: - result == expected_result fail_msg: "Unable to create restore using snapshot uuid spec" - success_msg: "Spec for databas restore using snapshot uuid created successfully" - + success_msg: "Spec for database restore using snapshot uuid created successfully" - name: perform restore using latest snapshot ntnx_ndb_database_restore: @@ -490,7 +447,6 @@ fail_msg: "Unable to restore database using latest snapshot" success_msg: "database restored successfully using latest snapshot" - - name: perform restore using snapshot uuid ntnx_ndb_database_restore: db_uuid: "{{db_uuid}}" @@ -519,33 +475,24 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "db_uuid": null, "error": null, "failed": false, - "response": { - "actionArguments": [ - { - "name": "working_dir", - "value": "/tmp" - }, - { - "name": "data_storage_size", - "value": 10 - }, - { - "name": "pre_script_cmd", - "value": "ls" - }, - { - "name": "post_script_cmd", - "value": "ls -a" - } - ], - "applicationType": "postgres_database" - } - } + "response": + { + "actionArguments": + [ + { "name": "working_dir", "value": "/tmp" }, + { "name": "data_storage_size", "value": 10 }, + { "name": "pre_script_cmd", "value": "ls" }, + { "name": "post_script_cmd", "value": "ls -a" }, + ], + "applicationType": "postgres_database", + }, + } - name: Check mode status assert: @@ -554,7 +501,6 @@ fail_msg: "Unable to create database scaling spec" success_msg: "Spec for database scaling with pre post commands created successfully" - - name: extend database storage for scaling database ntnx_ndb_database_scale: db_uuid: "{{db_uuid}}" @@ -575,7 +521,6 @@ ############################################ add / remove linked databases ########################################### - - name: create databases in database instance check_mode: yes ntnx_ndb_linked_databases: @@ -587,25 +532,22 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "db_instance_uuid": "{{db_uuid}}", "error": null, "failed": false, - "response": { - "databases": [ - { - "databaseName": "test1" - }, - { - "databaseName": "test2" - }, - { - "databaseName": "test3" - } - ] - } - } + "response": + { + "databases": + [ + { "databaseName": "test1" }, + { "databaseName": "test2" }, + { "databaseName": "test3" }, + ], + }, + } - name: Check mode status assert: @@ -614,7 +556,6 @@ fail_msg: "Unable to create spec for adding databases in database instance" success_msg: "Spec for adding databases in database instance created successfully" - - name: add databases in database instance ntnx_ndb_linked_databases: db_instance_uuid: "{{db_uuid}}" @@ -627,7 +568,7 @@ - name: create linked databases to its uuid map set_fact: - linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" + linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" loop: "{{result.response}}" no_log: true @@ -643,7 +584,6 @@ fail_msg: "Unable to add database to database instance" success_msg: "databases added to database instance successfully" - - name: remove databases in database instance ntnx_ndb_linked_databases: state: "absent" @@ -655,7 +595,7 @@ - name: create linked database map set_fact: - linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" + linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" loop: "{{result.response}}" no_log: true @@ -670,10 +610,8 @@ fail_msg: "Unable to remove database from database instance" success_msg: "linked database from database instance removed successfully" - ############################################ cleanup ########################################### - - name: delete database created earlier ntnx_ndb_databases: state: "absent" diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml index 26cc67f06..64c82eade 100644 --- a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml @@ -2,7 +2,6 @@ # Summary: # This playbook will test basic database flows - - debug: msg: "start ndb databases crud tests" @@ -17,7 +16,6 @@ ################################### Single instance postgres database tests ############################# - - name: create spec for single instance postgres database on new db server vm check_mode: yes ntnx_ndb_databases: @@ -83,110 +81,71 @@ register: result - set_fact: - expected_action_arguments: [ - { - "name": "dbserver_description", - "value": "vm for db server" - }, - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "auto_tune_staging_drive", - "value": false - }, - { - "name": "allocate_pg_hugepage", - "value": True - }, - { - "name": "cluster_database", - "value": false - }, - { - "name": "auth_method", - "value": "md5" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_create_script", - "value": "ls" - }, - { - "name": "post_create_script", - "value": "ls -a" - }, - { - "name": "database_names", - "value": "testAnsible" - }, - { - "name": "database_size", - "value": "200" - } - ] + expected_action_arguments: + [ + { "name": "dbserver_description", "value": "vm for db server" }, + { "name": "listener_port", "value": "9999" }, + { "name": "auto_tune_staging_drive", "value": false }, + { "name": "allocate_pg_hugepage", "value": True }, + { "name": "cluster_database", "value": false }, + { "name": "auth_method", "value": "md5" }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + { "name": "pre_create_script", "value": "ls" }, + { "name": "post_create_script", "value": "ls -a" }, + { "name": "database_names", "value": "testAnsible" }, + { "name": "database_size", "value": "200" }, + ] - set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - } - }, - "slaId": "{{sla.uuid}}" - } + expected_time_machine_info: + { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": + { + "continuousSchedule": + { + "enabled": true, + "logBackupInterval": 30, + "snapshotsPerDay": 2, + }, + "monthlySchedule": { "dayOfMonth": 4, "enabled": true }, + "quartelySchedule": + { "dayOfMonth": 4, "enabled": true, "startMonth": "JANUARY" }, + "snapshotTimeOfDay": { "hours": 11, "minutes": 10, "seconds": 2 }, + "weeklySchedule": { "dayOfWeek": "WEDNESDAY", "enabled": true }, + }, + "slaId": "{{sla.uuid}}", + } - set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } + maintenance_tasks: + { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": + [ + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, + }, + "taskType": "OS_PATCHING", + }, + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, + }, + "taskType": "DB_PATCHING", + }, + ], + } - name: Check mode status assert: @@ -205,13 +164,11 @@ - result.response.nodes | length == 1 - result.response.nodeCount == 1 - result.response.nodes[0].nxClusterId == "{{cluster.cluster1.uuid}}" - - result.response.maintenanceTasks == mainetance_tasks + - result.response.maintenanceTasks == maintenance_tasks - result.response.createDbserver == True fail_msg: "Unable to create single instance postgres database provision spec" success_msg: "single instance postgres database provision spec created successfully" - - - name: create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true @@ -283,7 +240,7 @@ # - name: create properties map set_fact: - properties: "{{ properties | combine ({ item['name'] : item['value'] }) }}" + properties: "{{ properties | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true # @@ -332,7 +289,6 @@ ################################### update tests ############################# - - name: update database with check mode check_mode: yes ntnx_ndb_databases: @@ -380,11 +336,9 @@ - result.response.tags[0].tagName == "{{tags.databases.name}}" - result.response.tags[0].value == "single-instance-dbs-updated" - fail_msg: "Unable to update single instance postgres database" success_msg: "single instance postgres database updated successfully" - - name: idempotency checks ntnx_ndb_databases: wait: true @@ -427,8 +381,6 @@ fail_msg: "creation of spec for delete db from vm failed" success_msg: "spec for delete db from vm created successfully" - - - name: create spec for soft remove check_mode: yes ntnx_ndb_databases: @@ -451,7 +403,6 @@ fail_msg: "creation of spec for soft remove with time machine delete failed" success_msg: "spec for soft remove with time machine delete created successfully" - #####################################INFO Module tests####################################################### - debug: @@ -525,7 +476,6 @@ fail_msg: "Unable to Get era databases using its id" success_msg: "Get era databases using its id finished successfully" - ################################################################ - name: get era database with incorrect name @@ -546,7 +496,6 @@ ############################################################################################ - - name: unregister db along with delete time machine ntnx_ndb_databases: db_uuid: "{{db_uuid}}" @@ -564,7 +513,6 @@ fail_msg: "database delete failed" success_msg: "database deleted successfully" - - name: delete db server vm ntnx_ndb_db_server_vms: state: "absent" diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml index 464f71e61..ac0bcaa97 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml @@ -20,7 +20,6 @@ ################################### Single instance postgres database tests ############################# - - name: create spec for single instance postgres database on new db server vm check_mode: yes ntnx_ndb_databases: @@ -86,110 +85,71 @@ register: result - set_fact: - expected_action_arguments: [ - { - "name": "dbserver_description", - "value": "vm for db server" - }, - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "auto_tune_staging_drive", - "value": false - }, - { - "name": "allocate_pg_hugepage", - "value": True - }, - { - "name": "cluster_database", - "value": false - }, - { - "name": "auth_method", - "value": "md5" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "pre_create_script", - "value": "ls" - }, - { - "name": "post_create_script", - "value": "ls -a" - }, - { - "name": "database_names", - "value": "testAnsible" - }, - { - "name": "database_size", - "value": "200" - } - ] + expected_action_arguments: + [ + { "name": "dbserver_description", "value": "vm for db server" }, + { "name": "listener_port", "value": "9999" }, + { "name": "auto_tune_staging_drive", "value": false }, + { "name": "allocate_pg_hugepage", "value": True }, + { "name": "cluster_database", "value": false }, + { "name": "auth_method", "value": "md5" }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + { "name": "pre_create_script", "value": "ls" }, + { "name": "post_create_script", "value": "ls -a" }, + { "name": "database_names", "value": "testAnsible" }, + { "name": "database_size", "value": "200" }, + ] - set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - } - }, - "slaId": "{{sla.uuid}}" - } + expected_time_machine_info: + { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": + { + "continuousSchedule": + { + "enabled": true, + "logBackupInterval": 30, + "snapshotsPerDay": 2, + }, + "monthlySchedule": { "dayOfMonth": 4, "enabled": true }, + "quartelySchedule": + { "dayOfMonth": 4, "enabled": true, "startMonth": "JANUARY" }, + "snapshotTimeOfDay": { "hours": 11, "minutes": 10, "seconds": 2 }, + "weeklySchedule": { "dayOfWeek": "WEDNESDAY", "enabled": true }, + }, + "slaId": "{{sla.uuid}}", + } - set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } + maintenance_tasks: + { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": + [ + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, + }, + "taskType": "OS_PATCHING", + }, + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, + }, + "taskType": "DB_PATCHING", + }, + ], + } - name: Check mode status assert: @@ -208,13 +168,11 @@ - result.response.nodes | length == 1 - result.response.nodeCount == 1 - result.response.nodes[0].nxClusterId == "{{cluster.cluster1.uuid}}" - - result.response.maintenanceTasks == mainetance_tasks + - result.response.maintenanceTasks == maintenance_tasks - result.response.createDbserver == True fail_msg: "Unable to create single instance postgres database provision spec" success_msg: "single instance postgres database provision spec created successfully" - - - name: create single instance postgres database on new db server vm ntnx_ndb_databases: wait: true @@ -281,7 +239,7 @@ # {% raw %} - name: create properties map set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} @@ -330,7 +288,6 @@ ################################### update tests ############################# - - name: update database with check mode check_mode: yes ntnx_ndb_databases: @@ -378,11 +335,9 @@ - result.response.tags[0].tagName == "{{tags.databases.name}}" - result.response.tags[0].value == "single-instance-dbs-updated" - fail_msg: "Unable to update single instance postgres database" success_msg: "single instance postgres database updated successfully" - - name: idempotency checks ntnx_ndb_databases: wait: true @@ -425,8 +380,6 @@ fail_msg: "creation of spec for delete db from vm failed" success_msg: "spec for delete db from vm created successfully" - - - name: create spec for soft remove check_mode: yes ntnx_ndb_databases: @@ -449,7 +402,6 @@ fail_msg: "creation of spec for soft remove with time machine delete failed" success_msg: "spec for soft remove with time machine delete created successfully" - - name: unregister db along with delete time machine ntnx_ndb_databases: state: "absent" @@ -469,7 +421,6 @@ ################################### single instance postgres database registration tests ############################# - - name: create spec for registering previously unregistered database from previously created VM's ip check_mode: yes ntnx_ndb_register_database: @@ -519,86 +470,68 @@ register: result - set_fact: - expected_action_arguments: [ - { - "name": "listener_port", - "value": "9999" - }, - { - "name": "db_name", - "value": "testAnsible1" - }, - { - "name": "db_user", - "value": "postgres" - }, - { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - { - "name": "postgres_software_home", - "value": "{{postgres.software_home}}" - } - ] + expected_action_arguments: + [ + { "name": "listener_port", "value": "9999" }, + { "name": "db_name", "value": "testAnsible1" }, + { "name": "db_user", "value": "postgres" }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + { + "name": "postgres_software_home", + "value": "{{postgres.software_home}}", + }, + ] - set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - } - }, - "slaId": "{{sla.uuid}}" - } + expected_time_machine_info: + { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": + { + "continuousSchedule": + { + "enabled": true, + "logBackupInterval": 30, + "snapshotsPerDay": 2, + }, + "monthlySchedule": { "dayOfMonth": 4, "enabled": true }, + "quartelySchedule": + { "dayOfMonth": 4, "enabled": true, "startMonth": "JANUARY" }, + "snapshotTimeOfDay": { "hours": 11, "minutes": 10, "seconds": 2 }, + "weeklySchedule": { "dayOfWeek": "WEDNESDAY", "enabled": true }, + }, + "slaId": "{{sla.uuid}}", + } - set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } + maintenance_tasks: + { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": + [ + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, + }, + "taskType": "OS_PATCHING", + }, + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, + }, + "taskType": "DB_PATCHING", + }, + ], + } - name: Check mode status assert: @@ -612,13 +545,12 @@ - result.response.autoTuneStagingDrive == False - result.response.timeMachineInfo == expected_time_machine_info - result.response.vmIp == "10.10.10.10" - - result.response.maintenanceTasks == mainetance_tasks + - result.response.maintenanceTasks == maintenance_tasks - result.response.workingDirectory == "/check" fail_msg: "Unable to create register database spec" success_msg: "single instance postgres database register spec created successfully" - -- name: regsiter previously unregistered database from previously created VM +- name: register previously unregistered database from previously created VM ntnx_ndb_register_database: wait: true @@ -694,7 +626,6 @@ fail_msg: "Unable to register single instance postgres database" success_msg: "single instance postgres database registered successfully" - - set_fact: db_uuid: "{{result.db_uuid}}" #####################################INFO Module tests####################################################### @@ -770,7 +701,6 @@ fail_msg: "Unable to Get era databases using its id" success_msg: "Get era databases using its id finished successfully" - ################################################################ - name: get era database with incorrect name @@ -791,7 +721,6 @@ ############################################################################################ - - name: unregister db along with delete time machine ntnx_ndb_databases: db_uuid: "{{db_uuid}}" @@ -809,7 +738,6 @@ fail_msg: "database delete failed" success_msg: "database deleted successfully" - - name: delete db server vm ntnx_ndb_db_server_vms: state: "absent" diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml index f213c1b8d..43ae28849 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml @@ -53,7 +53,6 @@ - set_fact: _vm_ip: "{{ result.response.ipAddresses[0] }}" - - name: create new single instance postgres database on vm created earlier ntnx_ndb_databases: wait: true @@ -96,7 +95,7 @@ # {% raw %} - name: create properties map set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true @@ -128,8 +127,7 @@ fail_msg: "Unable to create single instance postgres database" success_msg: "single instance postgres database created successfully" - -- name: unregister db along with delete time machine and unregister db servr vm +- name: unregister db along with delete time machine and unregister db server vm ntnx_ndb_databases: state: "absent" db_uuid: "{{db_uuid}}" @@ -148,7 +146,6 @@ fail_msg: "database unregistration failed" success_msg: "database unregistered successfully" - - name: create spec for registering previously unregistered DB from previously unregistered DB server vm check_mode: yes ntnx_ndb_register_database: @@ -193,68 +190,57 @@ register: result +- set_fact: + expected_action_arguments: + [ + { "name": "vmIp", "value": "{{_vm_ip}}" }, + { "name": "listener_port", "value": "5432" }, + { "name": "db_name", "value": "testAnsible1" }, + { "name": "db_user", "value": "postgres" }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + { + "name": "postgres_software_home", + "value": "{{postgres.software_home}}", + }, + ] - set_fact: - expected_action_arguments: [ - { - "name": "vmIp", - "value": "{{_vm_ip}}" - }, - { - "name": "listener_port", - "value": "5432" - }, - { - "name": "db_name", - "value": "testAnsible1" - }, + expected_time_machine_info: + { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": {}, + "slaId": "{{sla.uuid}}", + } + +- set_fact: + maintenance_tasks: + { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": + [ + { + "payload": { - "name": "db_user", - "value": "postgres" + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, }, + "taskType": "OS_PATCHING", + }, + { + "payload": { - "name": "db_password", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, }, - { - "name": "postgres_software_home", - "value": "{{postgres.software_home}}" - } - ] - -- set_fact: - expected_time_machine_info: { - "autoTuneLogDrive": true, - "description": "TM-desc", - "name": "TM1", - "schedule": {}, - "slaId": "{{sla.uuid}}" - } - -- set_fact: - mainetance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } + "taskType": "DB_PATCHING", + }, + ], + } - name: Check mode status assert: @@ -272,13 +258,11 @@ - result.response.databaseType == "postgres_database" - result.response.timeMachineInfo == expected_time_machine_info - result.response.nxClusterId == cluster.cluster1.uuid - - result.response.maintenanceTasks == mainetance_tasks + - result.response.maintenanceTasks == maintenance_tasks - result.response.workingDirectory == "/tmp" fail_msg: "Unable to create register database spec" success_msg: "single instance postgres database register spec created successfully" - - - name: register previously unregistered DB from previously unregistered DB server vm ntnx_ndb_register_database: wait: true diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml index 7f3bf3844..afc6f1b53 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml @@ -1,5 +1,4 @@ --- - - debug: msg: "start ntnx_ndb_db_server_vms, ntnx_ndb_register_db_server_vm, ntnx_ndb_db_servers_info and ntnx_ndb_maintenance_tasks tests. Approx Time: < 30 mins" @@ -53,73 +52,76 @@ # {% endraw %} - set_fact: - mainetance_tasks: { - "maintenanceWindowId": "test_window_uuid", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } + maintenance_tasks: + { + "maintenanceWindowId": "test_window_uuid", + "tasks": + [ + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, + }, + "taskType": "OS_PATCHING", + }, + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, + }, + "taskType": "DB_PATCHING", + }, + ], + } - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, - "response": { - "actionArguments": [ + "response": + { + "actionArguments": + [ { - "name": "vm_name", - "value": "ansible-created-vm1-from-time-machine" + "name": "vm_name", + "value": "ansible-created-vm1-from-time-machine", }, { - "name": "client_public_key", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } - ], + "name": "client_public_key", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + ], "computeProfileId": "test_compute_uuid", "databaseType": "postgres_database", "description": "ansible-created-vm1-from-time-machine-time-machine", "latestSnapshot": false, - "maintenanceTasks": { + "maintenanceTasks": + { "maintenanceWindowId": "test_window_uuid", - "tasks": [ + "tasks": + [ { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, }, - "taskType": "OS_PATCHING" + "taskType": "OS_PATCHING", }, { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, }, - "taskType": "DB_PATCHING" - } - ] - }, + "taskType": "DB_PATCHING", + }, + ], + }, "networkProfileId": "test_network_uuid", "nxClusterId": "test_cluster_uuid", "snapshotId": "test_snapshot_uuid", @@ -127,10 +129,10 @@ "softwareProfileVersionId": "", "timeMachineId": "test_uuid", "timeZone": "Asia/Calcutta", - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - "uuid": null - } + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + "uuid": null, + } - name: Check mode Status assert: @@ -139,7 +141,6 @@ fail_msg: "Unable to generate create db server vm spec with time machine as source" success_msg: "DB server VM spec created successfully" - - name: create spec for db server vm using software profile and names of profile check_mode: yes ntnx_ndb_db_server_vms: @@ -171,57 +172,57 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, - "response": { - "actionArguments": [ + "response": + { + "actionArguments": + [ + { "name": "vm_name", "value": "{{ vm1_name }}" }, { - "name": "vm_name", - "value": "{{ vm1_name }}" + "name": "client_public_key", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", }, - { - "name": "client_public_key", - "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - } - ], + ], "computeProfileId": "{{ compute_profile.uuid }}", "databaseType": "postgres_database", "description": "ansible-created-vm1-desc", "latestSnapshot": false, - "maintenanceTasks": { + "maintenanceTasks": + { "maintenanceWindowId": "{{ maintenance.window_uuid }}", - "tasks": [ + "tasks": + [ { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, }, - "taskType": "OS_PATCHING" + "taskType": "OS_PATCHING", }, { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, }, - "taskType": "DB_PATCHING" - } - ] - }, + "taskType": "DB_PATCHING", + }, + ], + }, "networkProfileId": "{{ network_profile.uuid }}", "nxClusterId": "{{ cluster.cluster1.uuid }}", "softwareProfileId": "{{ software_profile.uuid }}", "softwareProfileVersionId": "{{ software_profile.latest_version_id }}", "timeZone": "UTC", - "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" - }, - "uuid": null - } + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + }, + "uuid": null, + } - name: Check mode Status assert: @@ -230,7 +231,6 @@ fail_msg: "Unable to generate create db server vm spec with time machine as source and given names of profile" success_msg: "DB server VM spec created successfully" - - name: create db server vm using software profile ntnx_ndb_db_server_vms: wait: True @@ -292,7 +292,6 @@ - set_fact: vm_ip: "{{ result.response.ipAddresses[0] }}" - ################################### DB server VM update Tests ############################# - name: update db server vm name, desc, credentials, tags @@ -330,12 +329,12 @@ - name: check idempotency ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" - name: "{{vm1_name_updated}}" - desc: "ansible-created-vm1-updated-desc" - tags: - ansible-db-server-vms: "ansible-updated" + wait: True + uuid: "{{db_server_uuid}}" + name: "{{vm1_name_updated}}" + desc: "ansible-created-vm1-updated-desc" + tags: + ansible-db-server-vms: "ansible-updated" register: result - name: check idempotency status @@ -347,7 +346,6 @@ fail_msg: "db server vm got updated" success_msg: "db server vm update skipped successfully due to no changes in state" - - name: update db server vm name with check mode and check defaults check_mode: yes ntnx_ndb_db_server_vms: @@ -379,7 +377,6 @@ ntnx_ndb_db_servers_info: register: db_servers - - name: check listing status assert: that: @@ -518,7 +515,6 @@ fail_msg: "module didn't errored out correctly when incorrect name is given" success_msg: "module errored out correctly when incorrect name is given" - ################################### maintenance tasks update tests ############################# - name: create spec for adding maintenance window tasks to db server vm @@ -528,8 +524,8 @@ - name: "{{vm1_name_updated}}" - uuid: "test_vm_1" db_server_clusters: - - uuid: "test_cluter_1" - - uuid: "test_cluter_2" + - uuid: "test_cluster_1" + - uuid: "test_cluster_2" maintenance_window: name: "{{maintenance.window_name}}" tasks: @@ -542,45 +538,41 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, - "response": { - "entities": { - "ERA_DBSERVER": [ - "{{db_server_uuid}}", - "test_vm_1" - ], - "ERA_DBSERVER_CLUSTER": [ - "test_cluter_1", - "test_cluter_2" - ] - }, + "response": + { + "entities": + { + "ERA_DBSERVER": ["{{db_server_uuid}}", "test_vm_1"], + "ERA_DBSERVER_CLUSTER": ["test_cluster_1", "test_cluster_2"], + }, "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ + "tasks": + [ { - "payload": { - "prePostCommand": { - "postCommand": "ls", - "preCommand": "ls -a" - } + "payload": + { + "prePostCommand": + { "postCommand": "ls", "preCommand": "ls -a" }, }, - "taskType": "OS_PATCHING" + "taskType": "OS_PATCHING", }, { - "payload": { - "prePostCommand": { - "postCommand": "ls", - "preCommand": "ls -a" - } + "payload": + { + "prePostCommand": + { "postCommand": "ls", "preCommand": "ls -a" }, }, - "taskType": "DB_PATCHING" - } - ] - }, - "uuid": "{{maintenance.window_uuid}}" - } + "taskType": "DB_PATCHING", + }, + ], + }, + "uuid": "{{maintenance.window_uuid}}", + } - name: Check mode status assert: @@ -590,7 +582,6 @@ fail_msg: "Unable to create spec for adding maintenance tasks for db server vm" success_msg: "spec for adding maintenance tasks for db server vm created successfully" - - name: create spec for removing maintenance window tasks from above created vm check_mode: yes ntnx_ndb_maintenance_tasks: @@ -602,21 +593,19 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, - "response": { - "entities": { - "ERA_DBSERVER": [ - "{{db_server_uuid}}" - ] - }, + "response": + { + "entities": { "ERA_DBSERVER": ["{{db_server_uuid}}"] }, "maintenanceWindowId": "{{maintenance.window_uuid}}", "tasks": [], }, - "uuid": "{{maintenance.window_uuid}}" - } + "uuid": "{{maintenance.window_uuid}}", + } - name: Check mode status assert: @@ -626,7 +615,6 @@ fail_msg: "Unable to create spec for removing maintenance tasks for db server vm" success_msg: "spec for removing maintenance tasks for db server vm created successfully" - - name: db server vm already contains some tasks so remove maintenance window tasks from above created vm ntnx_ndb_maintenance_tasks: db_server_vms: @@ -662,8 +650,7 @@ fail_msg: "Unable to remove maintenance tasks for given db server vm" success_msg: "maintenance tasks for given db server vm removed successfully" - -- name: Add maitenance window task for vm +- name: Add maintenance window task for vm ntnx_ndb_maintenance_tasks: db_server_vms: - name: "{{vm1_name_updated}}" @@ -724,7 +711,6 @@ fail_msg: "Unable to remove maintenance tasks for given db server vm" success_msg: "maintenance tasks for given db server vm removed successfully" - ################################### DB server VM unregistration tests ############################# - name: generate check mode spec for unregister with default values @@ -749,8 +735,7 @@ fail_msg: "Unable to generate check mode spec for unregister" success_msg: "DB server VM unregister spec generated successfully" - -- name: genereate check mode spec for delete vm with vgs and snapshots +- name: generate check mode spec for delete vm with vgs and snapshots check_mode: yes ntnx_ndb_db_server_vms: state: "absent" @@ -774,7 +759,6 @@ fail_msg: "Unable to generate check mode spec for unregister" success_msg: "DB server VM update spec generated successfully" - - name: unregister vm ntnx_ndb_db_server_vms: state: "absent" @@ -797,8 +781,7 @@ ################################### DB server VM Registration tests ############################# - -- name: generate spec for registeration of the previous unregistered vm using check mode +- name: generate spec for registration of the previous unregistered vm using check mode check_mode: yes ntnx_ndb_register_db_server_vm: ip: "{{vm_ip}}" @@ -830,36 +813,36 @@ # {% raw %} - name: create action_arguments map set_fact: - action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" + action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.actionArguments}}" no_log: true # {% endraw %} - set_fact: - maintenance_tasks: { - "maintenanceWindowId": "{{maintenance.window_uuid}}", - "tasks": [ - { - "payload": { - "prePostCommand": { - "postCommand": "ls -a", - "preCommand": "ls" - } - }, - "taskType": "OS_PATCHING" - }, - { - "payload": { - "prePostCommand": { - "postCommand": "ls -F", - "preCommand": "ls -l" - } - }, - "taskType": "DB_PATCHING" - } - ] - } + maintenance_tasks: + { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": + [ + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -a", "preCommand": "ls" }, + }, + "taskType": "OS_PATCHING", + }, + { + "payload": + { + "prePostCommand": + { "postCommand": "ls -F", "preCommand": "ls -l" }, + }, + "taskType": "DB_PATCHING", + }, + ], + } - name: Check mode status assert: @@ -879,7 +862,6 @@ fail_msg: "Unable to create spec for db server vm registration" success_msg: "DB server VM registration spec generated successfully" - - name: register the previous unregistered vm ntnx_ndb_register_db_server_vm: ip: "{{vm_ip}}" @@ -908,7 +890,7 @@ # {% raw %} - name: create properties map set_fact: - properties1: "{{ properties1 | default({}) | combine ({ item['name'] : item['value'] }) }}" + properties1: "{{ properties1 | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true # {% endraw %} @@ -934,13 +916,11 @@ fail_msg: "Unable to create db server vm using software profile" success_msg: "DB server VM created successfully" - - set_fact: db_server_uuid: "{{result.uuid}}" ################################### DB server VM Delete test ############################# - - name: unregister db server vm ntnx_ndb_db_server_vms: state: "absent" diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md b/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md index 8735ed118..a2e631b40 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md @@ -1,3 +1,4 @@ ### Modules Tested: -1. ntnx_ndb_maitenance_window -2. ntnx_ndb_maitenance_windows_info + +1. ntnx_ndb_maintenance_window +2. ntnx_ndb_maintenance_windows_info diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml index efaa5eb49..8e6a4b4bb 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml @@ -1,7 +1,6 @@ --- - - debug: - msg: "start ndb database maintenance winndow tests" + msg: "start ndb database maintenance window tests" - name: Generate random name set_fact: @@ -16,7 +15,7 @@ check_mode: yes ntnx_ndb_maintenance_window: name: "{{window1_name}}" - desc: "anisble-created-window" + desc: "ansible-created-window" schedule: recurrence: "weekly" duration: 2 @@ -26,24 +25,27 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, - "response": { - "description": "anisble-created-window", + "response": + { + "description": "ansible-created-window", "name": "{{window1_name}}", - "schedule": { + "schedule": + { "dayOfWeek": "TUESDAY", "duration": 2, "recurrence": "WEEKLY", "startTime": "11:00:00", - "weekOfMonth": null - }, - "timezone": "Asia/Calcutta" - }, - "uuid": null - } + "weekOfMonth": null, + }, + "timezone": "Asia/Calcutta", + }, + "uuid": null, + } - name: Check mode status assert: @@ -52,11 +54,10 @@ fail_msg: "Unable to create spec for creating window" success_msg: "spec for maintenance window generated successfully" - - name: create window with weekly schedule ntnx_ndb_maintenance_window: name: "{{window1_name}}" - desc: "anisble-created-window" + desc: "ansible-created-window" schedule: recurrence: "weekly" duration: 2 @@ -77,7 +78,7 @@ - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - result.response.name == window1_name - - result.response.description == "anisble-created-window" + - result.response.description == "ansible-created-window" - result.response.schedule.dayOfWeek == "TUESDAY" - result.response.schedule.recurrence == "WEEKLY" - result.response.schedule.startTime == "11:00:00" @@ -88,11 +89,10 @@ fail_msg: "Unable to create maintenance window with weekly schedule" success_msg: "maintenance window with weekly schedule created successfully" - - name: create window with monthly schedule ntnx_ndb_maintenance_window: name: "{{window2_name}}" - desc: "anisble-created-window" + desc: "ansible-created-window" schedule: recurrence: "monthly" duration: 2 @@ -115,7 +115,7 @@ - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - result.response.name == window2_name - - result.response.description == "anisble-created-window" + - result.response.description == "ansible-created-window" - result.response.schedule.dayOfWeek == "TUESDAY" - result.response.schedule.recurrence == "MONTHLY" - result.response.schedule.startTime == "11:00:00" @@ -123,7 +123,6 @@ - result.response.schedule.weekOfMonth == 2 - result.response.schedule.duration == 2 - fail_msg: "Unable to create maintenance window with monthly schedule" success_msg: "maintenance window with monthly schedule created successfully" @@ -163,12 +162,11 @@ ############################################## update tests #################################### - - name: update window schedule ntnx_ndb_maintenance_window: uuid: "{{window2_uuid}}" name: "{{window2_name}}-updated" - desc: "anisble-created-window-updated" + desc: "ansible-created-window-updated" schedule: recurrence: "monthly" duration: 3 @@ -187,7 +185,7 @@ - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - result.response.name == "{{window2_name}}-updated" - - result.response.description == "anisble-created-window-updated" + - result.response.description == "ansible-created-window-updated" - result.response.schedule.dayOfWeek == "WEDNESDAY" - result.response.schedule.recurrence == "MONTHLY" - result.response.schedule.startTime == "12:00:00" @@ -195,7 +193,6 @@ - result.response.schedule.weekOfMonth == 3 - result.response.schedule.duration == 3 - fail_msg: "Unable to update maintenance window" success_msg: "maintenance window updated successfully" @@ -220,7 +217,7 @@ - result.uuid is defined - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" - result.response.name == "{{window2_name}}-updated" - - result.response.description == "anisble-created-window-updated" + - result.response.description == "ansible-created-window-updated" - result.response.schedule.dayOfWeek == "WEDNESDAY" - result.response.schedule.recurrence == "WEEKLY" - result.response.schedule.startTime == "12:00:00" @@ -228,7 +225,6 @@ - result.response.schedule.weekOfMonth == None - result.response.schedule.duration == 3 - fail_msg: "Unable to update maintenance window" success_msg: "maintenance window updated successfully" @@ -236,7 +232,7 @@ ntnx_ndb_maintenance_window: uuid: "{{window2_uuid}}" name: "{{window2_name}}-updated" - desc: "anisble-created-window-updated" + desc: "ansible-created-window-updated" schedule: recurrence: "weekly" duration: 3 @@ -263,7 +259,6 @@ register: result - - name: update status assert: that: @@ -280,7 +275,6 @@ - result.response.schedule.weekOfMonth == None - result.response.schedule.duration == 3 - fail_msg: "Unable to update maintenance window" success_msg: "maintenance window updated successfully" @@ -312,7 +306,6 @@ fail_msg: "Unable to update maintenance window" success_msg: "maintenance window updated successfully" - ############################################## delete tests #################################### - name: delete window 1 @@ -336,7 +329,6 @@ state: "absent" register: result - - name: check delete status assert: that: diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml index 081e0e8f9..8bbe06617 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml @@ -147,7 +147,7 @@ fail_msg: "Fail: unable to verify unpublish flow in compute profile " success_msg: "Pass: verify unpublish flow in compute profile finished successfully" ################################################################ -- name: Delete all created cmpute profiles +- name: Delete all created compute profiles ntnx_ndb_profiles: state: absent profile_uuid: "{{ item }}" diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml index 69c8634a8..8f4a0165b 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml @@ -23,7 +23,7 @@ autovacuum_vacuum_scale_factor: 0.3 autovacuum_work_mem: 1 autovacuum_max_workers: 2 - autovacuum_vacuum_cost_delay: 22 + autovacuum_vacuum_cost_delay: 22 wal_buffers: 1 synchronous_commit: local random_page_cost: 3 @@ -61,14 +61,13 @@ autovacuum_vacuum_scale_factor: "{{autovacuum_vacuum_scale_factor}}" autovacuum_work_mem: "{{autovacuum_work_mem}}" autovacuum_max_workers: "{{autovacuum_max_workers}}" - autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" + autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" wal_buffers: "{{wal_buffers}}" synchronous_commit: "{{synchronous_commit}}" random_page_cost: "{{random_page_cost}}" register: result ignore_errors: true - - name: check listing status assert: that: @@ -151,7 +150,6 @@ register: result ignore_errors: true - - name: check listing status assert: that: @@ -162,7 +160,7 @@ fail_msg: "Fail: verify unpublish flow in database_parameter profile " success_msg: "Pass: verify unpublish flow in database_parameter profile finished successfully " ################################################################ -- name: verify creatition of db params profile with defaults +- name: verify creation of db params profile with defaults ntnx_ndb_profiles: name: "{{profile3_name}}" desc: "testdesc" @@ -181,8 +179,8 @@ - result.response.description == "testdesc" - result.response.type == "Database_Parameter" - result.response.versions is defined - fail_msg: "Fail: Unable to verify creatition of db params profile with defaults " - success_msg: "Pass: verify creatition of db params profile with defaults finished successfully " + fail_msg: "Fail: Unable to verify creation of db params profile with defaults " + success_msg: "Pass: verify creation of db params profile with defaults finished successfully " - set_fact: todelete: "{{ todelete + [ result.profile_uuid ] }}" diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml index 6ed797ebc..87d84ab9e 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml @@ -20,8 +20,7 @@ network: topology: single vlans: - - - cluster: + - cluster: name: "{{network_profile.single.cluster.name}}" vlan_name: "{{network_profile.single.vlan_name}}" enable_ip_address_selection: true @@ -128,7 +127,7 @@ # - result.response.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" # - result.response.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" # fail_msg: "Fail: unable to verify create of multiple cluster network profile " -# success_msg: "Pass: verify create of multiple cluster network profile finished sucessfully" +# success_msg: "Pass: verify create of multiple cluster network profile finished successfully" # - set_fact: # todelete: "{{ todelete + [ result.profile_uuid ] }}" diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml index aef0c0daf..2879a64f8 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -17,11 +17,9 @@ - set_fact: profile1_name: "{{random_name[0]}}" - profile1_name_updated: "{{random_name[0]}}-updated" + profile1_name_updated: "{{random_name[0]}}-updated" profile2_name: "{{random_name[0]}}2" - - - name: create software profile create spec check_mode: yes ntnx_ndb_profiles: @@ -43,49 +41,39 @@ - uuid: "{{cluster.cluster2.uuid}}" register: result - - - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, "profile_uuid": null, - "response": { - "availableClusterIds": [ - "{{cluster.cluster1.uuid}}", - "{{cluster.cluster2.uuid}}" - ], + "response": + { + "availableClusterIds": + ["{{cluster.cluster1.uuid}}", "{{cluster.cluster2.uuid}}"], "description": "{{profile1_name}}-desc", "engineType": "postgres_database", "name": "{{profile1_name}}", - "properties": [ - { - "name": "BASE_PROFILE_VERSION_NAME", - "value": "v1.0" - }, + "properties": + [ + { "name": "BASE_PROFILE_VERSION_NAME", "value": "v1.0" }, { - "name": "BASE_PROFILE_VERSION_DESCRIPTION", - "value": "v1.0-desc" + "name": "BASE_PROFILE_VERSION_DESCRIPTION", + "value": "v1.0-desc", }, + { "name": "OS_NOTES", "value": "os_notes" }, + { "name": "DB_SOFTWARE_NOTES", "value": "db_notes" }, { - "name": "OS_NOTES", - "value": "os_notes" + "name": "SOURCE_DBSERVER_ID", + "value": "{{db_server_vm.uuid}}", }, - { - "name": "DB_SOFTWARE_NOTES", - "value": "db_notes" - }, - { - "name": "SOURCE_DBSERVER_ID", - "value": "{{db_server_vm.uuid}}" - } - ], + ], "systemProfile": false, "topology": "cluster", - "type": "Software" - } - } + "type": "Software", + }, + } - name: check spec for creating software profile assert: @@ -115,8 +103,6 @@ - uuid: "{{cluster.cluster2.uuid}}" register: result - - - set_fact: clusters: ["{{cluster.cluster1.uuid}}", "{{cluster.cluster2.uuid}}"] @@ -142,7 +128,6 @@ fail_msg: "Fail: Unable to create software profile with base version and cluster instance topology with replicating to multiple clusters." success_msg: "Pass: Software profile with base version, cluster instance topology and replicated to multiple clusters created successfully" - - name: create software profile with base version and single instance topology ntnx_ndb_profiles: name: "{{profile2_name}}" @@ -162,8 +147,6 @@ - name: "{{cluster.cluster1.name}}" register: result - - - name: check status of creation assert: that: @@ -185,7 +168,6 @@ fail_msg: "Fail: Unable to create software profile with base version and single instance topology" success_msg: "Pass: Software profile with base version and single instance topology created successfully" - - set_fact: profile_uuid: "{{result.profile_uuid}}" @@ -196,8 +178,6 @@ desc: "{{profile1_name}}-desc-updated" register: result - - - name: check status of creation assert: that: @@ -212,7 +192,6 @@ fail_msg: "Fail: Unable to update software profile" success_msg: "Pass: Software profile updated successfully" - - name: idempotency checks ntnx_ndb_profiles: profile_uuid: "{{profile_uuid}}" @@ -220,8 +199,6 @@ desc: "{{profile1_name}}-desc-updated" register: result - - - name: check status of creation assert: that: @@ -233,7 +210,7 @@ - result.response.profile.name == "{{profile1_name}}-updated1" - result.response.profile.description == "{{profile1_name}}-desc-updated" - fail_msg: "Fail: Update didnt get skipped due to no state changes" + fail_msg: "Fail: Update did not get skipped due to no state changes" success_msg: "Pass: Update skipped successfully due to no state changes" - name: create software profile version spec @@ -253,42 +230,41 @@ register: result - set_fact: - expected_result: { + expected_result: + { "changed": false, "error": null, "failed": false, "profile_type": "software", "profile_uuid": "{{profile_uuid}}", - "response": { - "profile": { + "response": + { + "profile": + { "description": "{{profile1_name}}-desc-updated", "engineType": "postgres_database", - "name": "{{profile1_name}}-updated1" - }, - "version": { + "name": "{{profile1_name}}-updated1", + }, + "version": + { "description": "v2.0-desc", "engineType": "postgres_database", "name": "v2.0", - "properties": [ - { - "name": "OS_NOTES", - "value": "os_notes for v2" - }, + "properties": + [ + { "name": "OS_NOTES", "value": "os_notes for v2" }, + { "name": "DB_SOFTWARE_NOTES", "value": "db_notes for v2" }, { - "name": "DB_SOFTWARE_NOTES", - "value": "db_notes for v2" + "name": "SOURCE_DBSERVER_ID", + "value": "{{db_server_vm.uuid}}", }, - { - "name": "SOURCE_DBSERVER_ID", - "value": "{{db_server_vm.uuid}}" - } - ], + ], "systemProfile": false, "topology": null, - "type": "Software" - } - } - } + "type": "Software", + }, + }, + } - name: check spec for creating spec for software profile version assert: @@ -298,7 +274,6 @@ fail_msg: "Fail: Unable to create spec for software profile version create" success_msg: "Pass: Spec for creating software profile version generated successfully" - - name: create software profile version ntnx_ndb_profiles: profile_uuid: "{{profile_uuid}}" @@ -314,8 +289,6 @@ register: result - - - name: check status of version create assert: that: @@ -349,8 +322,6 @@ register: result - - - name: check status of spec assert: that: @@ -366,7 +337,6 @@ fail_msg: "Fail: Unable to create spec for updating software profile version" success_msg: "Pass: Spec for updating software profile version created successfully" - - name: update software profile version ntnx_ndb_profiles: profile_uuid: "{{profile_uuid}}" @@ -378,8 +348,6 @@ register: result - - - name: check status of update assert: that: @@ -401,7 +369,6 @@ fail_msg: "Fail: Unable to update software profile version" success_msg: "Pass: Software profile version updated successfully" - - set_fact: version_uuid: "{{result.version_uuid}}" @@ -413,8 +380,6 @@ publish: True register: result - - - name: check status of update assert: that: @@ -455,7 +420,6 @@ fail_msg: "Fail: Unable to unpublish software profile version" success_msg: "Pass: Software version unpublished successfully" - - name: deprecate software profile version ntnx_ndb_profiles: profile_uuid: "{{profile_uuid}}" @@ -464,8 +428,6 @@ deprecate: True register: result - - - name: check status of update assert: that: @@ -482,8 +444,6 @@ fail_msg: "Fail: Unable to deprecate software profile version" success_msg: "Pass: Software version deprecated successfully" - - - name: delete software profile version ntnx_ndb_profiles: profile_uuid: "{{profile_uuid}}" @@ -492,7 +452,6 @@ state: "absent" register: result - - name: check status of update assert: that: @@ -506,7 +465,6 @@ fail_msg: "Fail: Unable to delete software profile version" success_msg: "Pass: Software version deleted successfully" - - name: replicate software profile ntnx_ndb_profiles: profile_uuid: "{{profile_uuid}}" @@ -518,7 +476,6 @@ ansible.builtin.pause: minutes: 3 - - set_fact: clusters: {} @@ -551,7 +508,6 @@ state: "absent" register: result - - name: check status of delete assert: that: diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml index adcdcc300..9a4aaa3f1 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml @@ -413,7 +413,7 @@ ################################################################ -- name: Delete all created vlan's +- name: Delete all created vlans ntnx_ndb_vlans: state: absent vlan_uuid: "{{ item }}" @@ -429,6 +429,6 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created vlan's" - success_msg: "All vlan'sdeleted successfully" + success_msg: "All vlans deleted successfully" - set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml index ad41fd7eb..d580388d1 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml @@ -1,16 +1,15 @@ --- - debug: - msg: Start negative secanrios ntnx_ndb_vlans + msg: Start negative scenarios ntnx_ndb_vlans - name: create Dhcp ndb vlan with static Configuration ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ndb_vlan.name}}" vlan_type: DHCP gateway: "{{ndb_vlan.gateway}}" subnet_mask: "{{ndb_vlan.subnet_mask}}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" primary_dns: "{{ndb_vlan.primary_dns}}" secondary_dns: "{{ndb_vlan.secondary_dns}}" @@ -26,11 +25,11 @@ - result.failed == true - result.msg == "Failed generating create vlan spec" fail_msg: "fail: create Dhcp ndb vlan with static Configuration finished successfully" - success_msg: "pass: Returnerd error as expected" + success_msg: "pass: Returned error as expected" # ############################### - name: create static ndb vlan with missing Configuration ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ndb_vlan.name}}" vlan_type: Static gateway: "{{ndb_vlan.gateway}}" register: result @@ -44,12 +43,12 @@ - result.failed == true - result.msg == "Failed generating create vlan spec" fail_msg: "fail: create static ndb vlan with missing Configuration finished successfully" - success_msg: "pass: Returnerd error as expected" + success_msg: "pass: Returned error as expected" ########### - name: create Dhcp ndb vlan ntnx_ndb_vlans: - name: "{{ndb_vlan.name}}" + name: "{{ndb_vlan.name}}" vlan_type: DHCP cluster: uuid: "{{cluster.cluster2.uuid}}" @@ -80,11 +79,9 @@ gateway: "{{ndb_vlan.gateway}}" subnet_mask: "{{ndb_vlan.subnet_mask}}" ip_pools: - - - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" - - - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" primary_dns: "{{ndb_vlan.primary_dns}}" secondary_dns: "{{ndb_vlan.secondary_dns}}" @@ -100,11 +97,11 @@ - result.failed == true - result.msg == "Failed generating update vlan spec" fail_msg: "fail: update dhcp ndb vlan with static Configuration finished successfully" - success_msg: "pass: Returnerd error as expected" + success_msg: "pass: Returned error as expected" ################################## -- name: Delete all created vlan's +- name: Delete all created vlan ntnx_ndb_vlans: state: absent vlan_uuid: "{{ item }}" @@ -120,7 +117,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created vlan's" - success_msg: "All vlan'sdeleted successfully" + success_msg: "All vlans deleted successfully" - set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml index 685690e4a..f9dea0812 100644 --- a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml +++ b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml @@ -1,12 +1,12 @@ - debug: msg: Start testing create ova for vm -- name: VM with minimum requiremnts +- name: VM with minimum requirements ntnx_vms: - state: present - name: integration_test_ova_vm - cluster: - name: "{{ cluster.name }}" + state: present + name: integration_test_ova_vm + cluster: + name: "{{ cluster.name }}" register: vm ignore_errors: true @@ -15,14 +15,14 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requiremnts ' - success_msg: 'Success: VM with minimum requiremnts created successfully ' + fail_msg: "Fail: Unable to create VM with minimum requirements " + success_msg: "Success: VM with minimum requirements created successfully " ######################################### - name: create_ova_image with check mode ntnx_vms_ova: - src_vm_uuid: "{{ vm.vm_uuid }}" - name: integration_test_VMDK_ova - file_format: VMDK + src_vm_uuid: "{{ vm.vm_uuid }}" + name: integration_test_VMDK_ova + file_format: VMDK register: result ignore_errors: true check_mode: yes @@ -34,14 +34,14 @@ - result.changed == false - result.failed == false - result.task_uuid != "" - success_msg: ' Success: returned as expected ' - fail_msg: ' Fail: create_ova_image with check mode ' + success_msg: " Success: returned as expected " + fail_msg: " Fail: create_ova_image with check mode " ######################################### - name: create QCOW2 ova_image ntnx_vms_ova: - src_vm_uuid: "{{ vm.vm_uuid }}" - name: integration_test_QCOW2_ova - file_format: QCOW2 + src_vm_uuid: "{{ vm.vm_uuid }}" + name: integration_test_QCOW2_ova + file_format: QCOW2 register: result ignore_errors: true @@ -50,14 +50,14 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create QCOW2 ova_image ' - success_msg: 'Success: create QCOW2 ova_image successfully ' + fail_msg: "Fail: Unable to create QCOW2 ova_image " + success_msg: "Success: create QCOW2 ova_image successfully " ######################################### - name: create VMDK ova_image ntnx_vms_ova: - src_vm_uuid: "{{ vm.vm_uuid }}" - name: integration_test_VMDK_ova - file_format: VMDK + src_vm_uuid: "{{ vm.vm_uuid }}" + name: integration_test_VMDK_ova + file_format: VMDK register: result ignore_errors: true @@ -66,8 +66,8 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VMDK ova_image ' - success_msg: 'Success: create VMDK ova_image successfully ' + fail_msg: "Fail: Unable to create VMDK ova_image " + success_msg: "Success: create VMDK ova_image successfully " ######################################### - name: Delete all Created VMs ntnx_vms: diff --git a/tests/integration/targets/ntnx_projects/tasks/create_project.yml b/tests/integration/targets/ntnx_projects/tasks/create_project.yml index 220cc38b1..c4265c4dc 100644 --- a/tests/integration/targets/ntnx_projects/tasks/create_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/create_project.yml @@ -136,7 +136,7 @@ todelete: "{{ todelete + [ result.project_uuid ] }}" ################################################################# -- name: Create Project with alredy existing project name +- name: Create Project with already existing project name ntnx_projects: name: "{{ project.name }}" register: result diff --git a/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml index 2dd21fe6a..ed85b7b80 100644 --- a/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml +++ b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml @@ -54,7 +54,7 @@ ################################################################ -- name: Creat project with all specs +- name: Create project with all specs ntnx_projects: name: "{{project2_name}}" desc: desc-123 @@ -96,7 +96,8 @@ ignore_errors: "{{ignore_errors}}" - set_fact: - expected_subnets: ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] + expected_subnets: + ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] response_acp: "{{result.response.status.access_control_policy_list_status[0].access_control_policy_status.resources}}" - name: Creation Status @@ -124,14 +125,12 @@ fail_msg: "Unable to create project with all specifications" success_msg: "Project with all specifications created successfully" - - set_fact: todelete: "{{ todelete + [ result.project_uuid ] }}" - set_fact: user_group_to_delete: "{{result.response.status.project_status.resources.external_user_group_reference_list[0].uuid}}" - - name: Update Project role mappings and subnets and quotas ntnx_projects: project_uuid: "{{result.project_uuid}}" @@ -171,38 +170,42 @@ - set_fact: response_acp: "{{result.response.status.access_control_policy_list_status[0].access_control_policy_status.resources}}" - set_fact: - acp_users: ["{{response_acp.user_reference_list[0].uuid}}", "{{response_acp.user_reference_list[1].uuid}}"] + acp_users: + [ + "{{response_acp.user_reference_list[0].uuid}}", + "{{response_acp.user_reference_list[1].uuid}}", + ] - set_fact: - sorted_acp_users: '{{ acp_users | sort() }}' + sorted_acp_users: "{{ acp_users | sort() }}" - set_fact: expected_users: ["{{users[0]}}", "{{users[1]}}"] - set_fact: - expected_users_sorted: '{{ expected_users | sort() }}' + expected_users_sorted: "{{ expected_users | sort() }}" - set_fact: - project_user_reference_list: ["{{result.response.status.project_status.resources.user_reference_list[0].uuid}}", "{{result.response.status.project_status.resources.user_reference_list[1].uuid}}"] + project_user_reference_list: + [ + "{{result.response.status.project_status.resources.user_reference_list[0].uuid}}", + "{{result.response.status.project_status.resources.user_reference_list[1].uuid}}", + ] - set_fact: - project_user_references_sorted: '{{ project_user_reference_list|sort() }}' + project_user_references_sorted: "{{ project_user_reference_list|sort() }}" - set_fact: - expected_quotas: [ - { - "limit": 5, - "resource_type": "VCPUS", - "units": "COUNT", - "value": 0 - }, - { - "limit": 2147483648, - "resource_type": "STORAGE", - "units": "BYTES", - "value": 0 - }, - { - "limit": 2147483648, - "resource_type": "MEMORY", - "units": "BYTES", - "value": 0 - } - ] + expected_quotas: + [ + { "limit": 5, "resource_type": "VCPUS", "units": "COUNT", "value": 0 }, + { + "limit": 2147483648, + "resource_type": "STORAGE", + "units": "BYTES", + "value": 0, + }, + { + "limit": 2147483648, + "resource_type": "MEMORY", + "units": "BYTES", + "value": 0, + }, + ] - set_fact: quotas: "{{result.response.status.project_status.resources.resource_domain.resources}}" @@ -276,10 +279,9 @@ that: - result.changed == false - "'Nothing to update' in result.msg" - fail_msg: "Project update didnt got skipped for update spec same as existing project" + fail_msg: "Project update did not got skipped for update spec same as existing project" success_msg: "Project got skipped successfully for no change in spec" - - name: Create project with existing name ntnx_projects: name: "{{project3_name}}" @@ -296,13 +298,12 @@ register: result ignore_errors: true - - name: Creation Status assert: that: - result.changed == false - "'Project with given name already exists' in result.msg" - fail_msg: "Project creation didnt failed for existing name" + fail_msg: "Project creation did not failed for existing name" success_msg: "Project creation failed as expected" ################################################################# @@ -332,4 +333,4 @@ - result.changed == true - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" fail_msg: "Unable to delete user group " - success_msg: "user group deletd successfully" + success_msg: "user group deleted successfully" diff --git a/tests/integration/targets/ntnx_projects/tasks/update_project.yml b/tests/integration/targets/ntnx_projects/tasks/update_project.yml index 4d88442d1..6557f5384 100644 --- a/tests/integration/targets/ntnx_projects/tasks/update_project.yml +++ b/tests/integration/targets/ntnx_projects/tasks/update_project.yml @@ -12,7 +12,6 @@ - set_fact: project1_name: "{{random_name}}{{suffix_name}}1" - - name: Create Project ntnx_projects: name: "{{project1_name}}" @@ -154,7 +153,7 @@ that: - result.changed == false - "'Nothing to update' in result.msg" - fail_msg: "Project update didnt got skipped for update spec same as existing project" + fail_msg: "Project update did not got skipped for update spec same as existing project" success_msg: "Project got skipped successfully for no change in spec" ################################################################# diff --git a/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml b/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml index 0c2d9e7ce..b03931262 100644 --- a/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml +++ b/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml @@ -107,7 +107,7 @@ success_msg: "Protection policy with with synchronous schedule created successfully" -- name: Delete created protection policy inorder to avoid conflict in further tests +- name: Delete created protection policy in order to avoid conflict in further tests ntnx_protection_rules: state: absent wait: True diff --git a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml index 6a35bc2c2..75504ef2b 100644 --- a/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml +++ b/tests/integration/targets/ntnx_recovery_plans_and_jobs/tasks/crud.yml @@ -5,166 +5,163 @@ ############################################################### CREATE Recovery Plan ########################################################################################### - set_fact: - expected_availability_zone_list: [ + expected_availability_zone_list: + [ + { "availability_zone_url": "{{dr.primary_az_url}}" }, + { "availability_zone_url": "{{dr.recovery_az_url}}" }, + ] + expected_network_mapping_list_for_check_mode: + [ + { + "are_networks_stretched": True, + "availability_zone_network_mapping_list": + [ { - "availability_zone_url": "{{dr.primary_az_url}}" + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_network": { "name": "{{network.dhcp.name}}" }, + "test_network": { "name": "{{network.dhcp.name}}" }, }, { - "availability_zone_url": "{{dr.recovery_az_url}}" - } - ] - expected_network_mapping_list_for_check_mode: [ - { - "are_networks_stretched": True, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_network": { - "name": "{{network.dhcp.name}}" - }, - "test_network": { - "name": "{{network.dhcp.name}}" - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_network": { - "name": "{{dr.recovery_site_network}}" - }, - "test_network": { - "name": "{{dr.recovery_site_network}}" - } - } - ] - } - ] - expected_network_mapping_list: [ - { - "are_networks_stretched": False, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip2}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "recovery_network": { - "name": "{{network.dhcp.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip1}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "test_network": { - "name": "{{network.dhcp.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip2}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "recovery_network": { - "name": "{{dr.recovery_site_network}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_ip_assignment_list": [ - { - "ip_config_list": [ - { - "ip_address": "{{dr.recovery_ip1}}" - } - ], - "vm_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - } - } - ], - "test_network": { - "name": "{{dr.recovery_site_network}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{dr.gateway_ip}}", - "prefix_length": 24 - } - ] - } - } - ] - } - ] - expected_stage_work_0: { - "recover_entities": { - "entity_info_list": [ - { - "any_entity_reference": { - "kind": "vm", - "name": "{{dr_vm_name}}", - "uuid": "{{dr_vm.uuid}}" - }, - "script_list": [ - { - "enable_script_exec": true - } - ] - } - ] - } - } + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_network": { "name": "{{dr.recovery_site_network}}" }, + "test_network": { "name": "{{dr.recovery_site_network}}" }, + }, + ], + }, + ] + expected_network_mapping_list: + [ + { + "are_networks_stretched": False, + "availability_zone_network_mapping_list": + [ + { + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip2}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "recovery_network": + { + "name": "{{network.dhcp.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip1}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "test_network": + { + "name": "{{network.dhcp.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + }, + { + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip2}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "recovery_network": + { + "name": "{{dr.recovery_site_network}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_ip_assignment_list": + [ + { + "ip_config_list": + [{ "ip_address": "{{dr.recovery_ip1}}" }], + "vm_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + }, + ], + "test_network": + { + "name": "{{dr.recovery_site_network}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{dr.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + }, + ], + }, + ] + expected_stage_work_0: + { + "recover_entities": + { + "entity_info_list": + [ + { + "any_entity_reference": + { + "kind": "vm", + "name": "{{dr_vm_name}}", + "uuid": "{{dr_vm.uuid}}", + }, + "script_list": [{ "enable_script_exec": true }], + }, + ], + }, + } - name: Create checkmode spec for recovery plan with networks and 2 stage check_mode: yes @@ -195,7 +192,6 @@ name: "{{dr.recovery_site_network}}" register: result - - name: Checkmode spec assert assert: that: @@ -209,8 +205,8 @@ - result.response.spec.resources.stage_list[0]["stage_work"] == expected_stage_work_0 - result.response.spec.resources.parameters.availability_zone_list == expected_availability_zone_list - result.response.spec.resources.parameters.network_mapping_list == expected_network_mapping_list_for_check_mode - fail_msg: 'Unable to create recovery plan check mode spec' - success_msg: 'Recovery plan check mode spec created successfully' + fail_msg: "Unable to create recovery plan check mode spec" + success_msg: "Recovery plan check mode spec created successfully" - name: Create recovery plan with networks and 2 stage ntnx_recovery_plans: @@ -284,135 +280,124 @@ - result.response.status.resources.stage_list[0]["stage_work"] == expected_stage_work_0 - result.response.status.resources.parameters.availability_zone_list == expected_availability_zone_list - result.response.status.resources.parameters.network_mapping_list == expected_network_mapping_list - fail_msg: 'Unable to create recovery plans' - success_msg: 'Recovery plan created successfully' + fail_msg: "Unable to create recovery plans" + success_msg: "Recovery plan created successfully" ############################################################### Update Recovery Plan ########################################################################################### - set_fact: - expected_availability_zone_list: [ + expected_availability_zone_list: + [ + { "availability_zone_url": "{{dr.primary_az_url}}" }, + { "availability_zone_url": "{{dr.recovery_az_url}}" }, + ] + expected_network_mapping_list_in_check_mode: + [ + { + "are_networks_stretched": false, + "availability_zone_network_mapping_list": + [ + { + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + }, + { + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_network": { "name": "{{dr.recovery_site_network}}" }, + "test_network": { "name": "{{dr.recovery_site_network}}" }, + }, + ], + }, + ] + expected_network_mapping_list: + [ + { + "are_networks_stretched": false, + "availability_zone_network_mapping_list": + [ { - "availability_zone_url": "{{dr.primary_az_url}}" + "availability_zone_url": "{{dr.primary_az_url}}", + "recovery_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, + "test_network": + { + "name": "{{static.name}}", + "subnet_list": + [ + { + "external_connectivity_state": "DISABLED", + "gateway_ip": "{{static.gateway_ip}}", + "prefix_length": 24, + }, + ], + }, }, { - "availability_zone_url": "{{dr.recovery_az_url}}" - } - ] - expected_network_mapping_list_in_check_mode: [ - { - "are_networks_stretched": false, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_network": { - "name": "{{dr.recovery_site_network}}" - }, - "test_network": { - "name": "{{dr.recovery_site_network}}" - } - } - ] - } - ] - expected_network_mapping_list: [ - { - "are_networks_stretched": false, - "availability_zone_network_mapping_list": [ - { - "availability_zone_url": "{{dr.primary_az_url}}", - "recovery_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - }, - "test_network": { - "name": "{{static.name}}", - "subnet_list": [ - { - "external_connectivity_state": "DISABLED", - "gateway_ip": "{{static.gateway_ip}}", - "prefix_length": 24 - } - ] - } - }, - { - "availability_zone_url": "{{dr.recovery_az_url}}", - "recovery_network": { - "name": "{{dr.recovery_site_network}}" - }, - "test_network": { - "name": "{{dr.recovery_site_network}}" - } - } - ] - } - ] - exepected_stage_work_0: { - "recover_entities": { - "entity_info_list": [ - { - "any_entity_reference": { - "kind": "vm", - "name": "{{dr_vm.name}}", - "uuid": "{{dr_vm.uuid}}" - }, - "script_list": [ - { - "enable_script_exec": true - } - ] - }, - { - "categories": { - "Environment": "Staging" - }, - "script_list": [ - { - "enable_script_exec": true - } - ] - } - ] - } - } - exepected_stage_work_1: { - "recover_entities": { - "entity_info_list": [ - { - "categories": { - "Environment": "Dev" - } - } - ] - } - } + "availability_zone_url": "{{dr.recovery_az_url}}", + "recovery_network": { "name": "{{dr.recovery_site_network}}" }, + "test_network": { "name": "{{dr.recovery_site_network}}" }, + }, + ], + }, + ] + expected_stage_work_0: + { + "recover_entities": + { + "entity_info_list": + [ + { + "any_entity_reference": + { + "kind": "vm", + "name": "{{dr_vm.name}}", + "uuid": "{{dr_vm.uuid}}", + }, + "script_list": [{ "enable_script_exec": true }], + }, + { + "categories": { "Environment": "Staging" }, + "script_list": [{ "enable_script_exec": true }], + }, + ], + }, + } + expected_stage_work_1: + { + "recover_entities": + { "entity_info_list": [{ "categories": { "Environment": "Dev" } }] }, + } - name: Checkmode spec for Update recovery plan. Update networks and stages. check_mode: yes @@ -466,13 +451,12 @@ - result.response.spec.description == "test-integration-rp-desc-updated" - result.response.spec.resources.parameters.availability_zone_list == expected_availability_zone_list - result.response.spec.resources.parameters.network_mapping_list == expected_network_mapping_list_in_check_mode - - result.response.spec.resources.stage_list[0]["stage_work"] == exepected_stage_work_0 - - result.response.spec.resources.stage_list[1]["stage_work"] == exepected_stage_work_1 + - result.response.spec.resources.stage_list[0]["stage_work"] == expected_stage_work_0 + - result.response.spec.resources.stage_list[1]["stage_work"] == expected_stage_work_1 - result.response.spec.resources.stage_list[0]["delay_time_secs"] == 2 - fail_msg: 'Unable to create update recovery plan checkmode spec' - success_msg: 'Recovery plan update spec created successfully' - + fail_msg: "Unable to create update recovery plan checkmode spec" + success_msg: "Recovery plan update spec created successfully" - name: Update recovery plan. Add another stage, vm and update networks. ntnx_recovery_plans: @@ -526,13 +510,12 @@ - recovery_plan.response.status.description == "test-integration-rp-desc-updated" - recovery_plan.response.status.resources.parameters.availability_zone_list == expected_availability_zone_list - recovery_plan.response.status.resources.parameters.network_mapping_list == expected_network_mapping_list - - recovery_plan.response.status.resources.stage_list[0]["stage_work"] == exepected_stage_work_0 - - recovery_plan.response.status.resources.stage_list[1]["stage_work"] == exepected_stage_work_1 + - recovery_plan.response.status.resources.stage_list[0]["stage_work"] == expected_stage_work_0 + - recovery_plan.response.status.resources.stage_list[1]["stage_work"] == expected_stage_work_1 - recovery_plan.response.status.resources.stage_list[0]["delay_time_secs"] == 2 - fail_msg: 'Unable to updae recovery plans' - success_msg: 'Recovery plan updated successfully' - + fail_msg: "Unable to update recovery plans" + success_msg: "Recovery plan updated successfully" - name: Idempotency Check ntnx_recovery_plans: @@ -587,7 +570,6 @@ ############################################################### Run Recovery Plan Jobs########################################################################################### - - name: Run Test Failover with validation errors for checking negative scenario. It will fail in validation phase ntnx_recovery_plan_jobs: nutanix_host: "{{recovery_site_ip}}" @@ -632,7 +614,6 @@ register: test_failover_job - - name: assert job status assert: that: @@ -649,7 +630,6 @@ fail_msg: "Test failover job failed" success_msg: "Test failover job run successfully" - - name: Run Cleanup ntnx_recovery_plan_jobs: job_uuid: "{{test_failover_job.job_uuid}}" @@ -658,7 +638,6 @@ action: CLEANUP register: result - - name: assert job status assert: that: diff --git a/tests/integration/targets/ntnx_roles/tasks/create.yml b/tests/integration/targets/ntnx_roles/tasks/create.yml index 541965519..60fab7013 100644 --- a/tests/integration/targets/ntnx_roles/tasks/create.yml +++ b/tests/integration/targets/ntnx_roles/tasks/create.yml @@ -44,7 +44,7 @@ - ("{{ p1 }}" == "{{ test_permission_1_uuid }}" and "{{ p2 }}" == "{{ test_permission_2_uuid }}") or ("{{ p2 }}" == "{{ test_permission_1_uuid }}" and "{{ p1 }}" == "{{ test_permission_2_uuid }}") fail_msg: "Unable to create roles with certain permissions" - success_msg: "Roles with given permissions created susccessfully" + success_msg: "Roles with given permissions created successfully" - set_fact: todelete: '{{ result["response"]["metadata"]["uuid"] }}' @@ -99,7 +99,6 @@ ################################################################################################### - - name: cleanup created entities ntnx_roles: state: absent @@ -107,6 +106,5 @@ register: result ignore_errors: True - - set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_roles/tasks/delete.yml b/tests/integration/targets/ntnx_roles/tasks/delete.yml index 3d4f00410..c13f6e01e 100644 --- a/tests/integration/targets/ntnx_roles/tasks/delete.yml +++ b/tests/integration/targets/ntnx_roles/tasks/delete.yml @@ -28,7 +28,7 @@ - test_role.response is defined - test_role.changed == True fail_msg: "Unable to create roles with certain permissions" - success_msg: "Roles with given permissions created susccessfully" + success_msg: "Roles with given permissions created successfully" ################################################################################################### diff --git a/tests/integration/targets/ntnx_roles/tasks/update.yml b/tests/integration/targets/ntnx_roles/tasks/update.yml index 16644d37e..81b9c20ca 100644 --- a/tests/integration/targets/ntnx_roles/tasks/update.yml +++ b/tests/integration/targets/ntnx_roles/tasks/update.yml @@ -34,8 +34,7 @@ - test_role.response is defined - test_role.changed == True fail_msg: "Unable to create roles with certain permissions" - success_msg: "Roles with given permissions created susccessfully" - + success_msg: "Roles with given permissions created successfully" ################################################################################################### @@ -63,7 +62,7 @@ - result.response.status.resources.permission_reference_list | length == 1 fail_msg: "Unable to update role" - success_msg: "Roles with given permissions updated susccessfully" + success_msg: "Roles with given permissions updated successfully" ################################################################################################### diff --git a/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml index 0e9b038e3..b2ebfe70f 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml @@ -87,7 +87,7 @@ fail_msg: ' fail: unable to create app security rule with inbound and outbound list' success_msg: 'pass: create app security rule with inbound and outbound list successfully' -- name: update app security rule by adding to outbound list and remove tule from inbound list +- name: update app security rule by adding to outbound list and remove rule from inbound list ntnx_security_rules: security_rule_uuid: '{{ result.response.metadata.uuid }}' app_rule: diff --git a/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml index 5a7243409..682b58280 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/isolation_rule.yml @@ -5,11 +5,11 @@ name: test_isolation_rule isolation_rule: isolate_category: - Environment: - - Dev + Environment: + - Dev from_category: - Environment: - - Production + Environment: + - Production subset_category: Environment: - Staging @@ -26,7 +26,7 @@ - result.changed == false - result.response.spec.name=="test_isolation_rule" - result.security_rule_uuid is none - fail_msg: ' fail: unable to create isolation security rule with first_entity_filter and second_entity_filter with check mode ' + fail_msg: " fail: unable to create isolation security rule with first_entity_filter and second_entity_filter with check mode " success_msg: >- pass: create isolation security rule with first_entity_filter and second_entity_filter successfully with check mode @@ -37,11 +37,11 @@ name: test_isolation_rule isolation_rule: isolate_category: - Environment: - - Dev + Environment: + - Dev from_category: - Environment: - - Production + Environment: + - Production subset_category: Environment: - Staging @@ -57,14 +57,14 @@ - result.failed == false - result.response.spec.name=="test_isolation_rule" - result.response.status.state == 'COMPLETE' - fail_msg: ' fail: unable to create isolation security rule with first_entity_filter and second_entity_filter' + fail_msg: " fail: unable to create isolation security rule with first_entity_filter and second_entity_filter" success_msg: >- pass: create isolation security rule with first_entity_filter and second_entity_filter successfully -- name: update isoloation security rule action with check_mode +- name: update isolation security rule action with check_mode ntnx_security_rules: - security_rule_uuid: '{{ result.response.metadata.uuid }}' + security_rule_uuid: "{{ result.response.metadata.uuid }}" isolation_rule: policy_mode: APPLY register: output @@ -79,13 +79,13 @@ - output.changed == false - output.response.spec.name=="test_isolation_rule" - output.security_rule_uuid is none - fail_msg: ' fail: unable to update isoloation security rule action with check_mode' + fail_msg: " fail: unable to update isolation security rule action with check_mode" success_msg: >- - pass: update isoloation security rule action with check_mode successfully + pass: update isolation security rule action with check_mode successfully -- name: update isoloation security rule action +- name: update isolation security rule action ntnx_security_rules: - security_rule_uuid: '{{ result.security_rule_uuid}}' + security_rule_uuid: "{{ result.security_rule_uuid}}" isolation_rule: policy_mode: APPLY register: result @@ -99,11 +99,11 @@ - result.changed == true - result.response.status.state == 'COMPLETE' - result.response.spec.resources.isolation_rule.action == "APPLY" - fail_msg: ' fail: unable to update isolation rule action ' - success_msg: 'pass : update isolation rule action successfully' -- name: update isoloation security with same values + fail_msg: " fail: unable to update isolation rule action " + success_msg: "pass : update isolation rule action successfully" +- name: update isolation security with same values ntnx_security_rules: - security_rule_uuid: '{{result.security_rule_uuid}}' + security_rule_uuid: "{{result.security_rule_uuid}}" isolation_rule: policy_mode: APPLY register: output @@ -114,12 +114,12 @@ - output.failed == false - output.changed == false - output.msg == "Nothing to change" - fail_msg: ' fail: unable to update isolation rule action ' - success_msg: 'pass : update isolation rule action successfully' + fail_msg: " fail: unable to update isolation rule action " + success_msg: "pass : update isolation rule action successfully" - name: delete isolation rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ result.security_rule_uuid }}' + security_rule_uuid: "{{ result.security_rule_uuid }}" register: result ignore_errors: true @@ -129,5 +129,5 @@ - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete isolation security rule ' - success_msg: 'pass : delete isolation security rule successfully' + fail_msg: " fail: unable to delete isolation security rule " + success_msg: "pass : delete isolation security rule successfully" diff --git a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml index fe02bd1bd..a3edcc138 100644 --- a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml +++ b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml @@ -22,8 +22,8 @@ - first_rule.failed == false - first_rule.response.status.state == 'COMPLETE' - first_rule.response.spec.name=="isolation_test_rule" - fail_msg: ' fail: Unable to create isolation_rule for testing ' - success_msg: 'pass: isolation_rule for testing created successfully ' + fail_msg: " fail: Unable to create isolation_rule for testing " + success_msg: "pass: isolation_rule for testing created successfully " ################################### - name: getting all security rules ntnx_security_rules_info: @@ -38,12 +38,12 @@ - result.failed == false - result.response.metadata.kind == "network_security_rule" - result.response.metadata.total_matches > 0 - fail_msg: ' fail: unable to get security rules ' - success_msg: 'pass: get all security rules successfully ' + fail_msg: " fail: unable to get security rules " + success_msg: "pass: get all security rules successfully " ################################### -- name: getting particlar security rule using security_rule_uuid +- name: getting particular security rule using security_rule_uuid ntnx_security_rules_info: - security_rule_uuid: '{{ first_rule.response.metadata.uuid }}' + security_rule_uuid: "{{ first_rule.response.metadata.uuid }}" register: result ignore_errors: true @@ -55,8 +55,8 @@ - result.failed == false - result.response.status.state == 'COMPLETE' - first_rule.response.metadata.uuid == result.response.metadata.uuid - fail_msg: ' fail : unable to get particlar security rule using security_rule_uuid' - success_msg: 'pass: getting security rule using security_rule_uuid succesfuly' + fail_msg: " fail : unable to get particular security rule using security_rule_uuid" + success_msg: "pass: getting security rule using security_rule_uuid successfully" ################################### - name: getting all security rules sorted ntnx_security_rules_info: @@ -74,13 +74,13 @@ - result.response.metadata.kind == "network_security_rule" - result.response.metadata.sort_order == "ASCENDING" - result.response.metadata.sort_attribute == "Name" - fail_msg: ' fail: unable to get all security rules sorted' - success_msg: 'pass: getting all security rules sorted successfully ' + fail_msg: " fail: unable to get all security rules sorted" + success_msg: "pass: getting all security rules sorted successfully " ################################### - name: delete security rule ntnx_security_rules: state: absent - security_rule_uuid: '{{ first_rule.response.metadata.uuid }}' + security_rule_uuid: "{{ first_rule.response.metadata.uuid }}" register: result ignore_errors: true @@ -90,6 +90,6 @@ - result.response is defined - result.failed == false - result.response.status == 'SUCCEEDED' - fail_msg: ' fail: unable to delete secutiry rule ' - success_msg: 'pass: security rule deleted successfully ' + fail_msg: " fail: unable to delete security rule " + success_msg: "pass: security rule deleted successfully " ################################### diff --git a/tests/integration/targets/ntnx_service_groups/tasks/create.yml b/tests/integration/targets/ntnx_service_groups/tasks/create.yml index 47b8759cc..1eb48c81d 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/create.yml @@ -4,7 +4,7 @@ - name: create tcp service group ntnx_service_groups: - name: tcp_srvive_group + name: tcp_service_group desc: desc service_details: tcp: @@ -15,9 +15,9 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true @@ -43,7 +43,7 @@ ################################################################ - name: create udp service group ntnx_service_groups: - name: udp_srvive_group + name: udp_service_group desc: desc service_details: udp: @@ -54,9 +54,9 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true @@ -82,7 +82,7 @@ ################################################################ - name: create icmp with service group ntnx_service_groups: - name: icmp_srvive_group + name: icmp_service_group desc: desc service_details: icmp: @@ -93,9 +93,9 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true @@ -117,7 +117,7 @@ ################################################################ - name: create service group with tcp and udp and icmp ntnx_service_groups: - name: app_srvive_group + name: app_service_group desc: desc service_details: tcp: @@ -130,9 +130,9 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true diff --git a/tests/integration/targets/ntnx_service_groups/tasks/update.yml b/tests/integration/targets/ntnx_service_groups/tasks/update.yml index 2845caa71..2b2039cab 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/update.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/update.yml @@ -1,8 +1,7 @@ --- - - name: create tcp service group ntnx_service_groups: - name: tcp_srvive_group + name: tcp_service_group desc: desc service_details: tcp: @@ -42,9 +41,9 @@ register: result ignore_errors: true -- name: getting particular service_group using uuid +- name: getting particular service_group using uuid ntnx_service_groups_info: - service_group_uuid: '{{ result.service_group_uuid }}' + service_group_uuid: "{{ result.service_group_uuid }}" register: result ignore_errors: true diff --git a/tests/integration/targets/ntnx_static_routes/tasks/create.yml b/tests/integration/targets/ntnx_static_routes/tasks/create.yml index 16c81ed50..581f280b8 100644 --- a/tests/integration/targets/ntnx_static_routes/tasks/create.yml +++ b/tests/integration/targets/ntnx_static_routes/tasks/create.yml @@ -39,8 +39,8 @@ - result.response.status.resources.default_route["destination"] == "0.0.0.0/0" - result.response.status.resources.default_route["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" - fail_msg: 'Fail: Unable to update static routes of vpc' - success_msg: 'Succes: static routes updated successfully' + fail_msg: "Fail: Unable to update static routes of vpc" + success_msg: "Success: static routes updated successfully" ########################################################################################################### @@ -93,11 +93,11 @@ - result.response.status.resources.static_routes_list[0]["destination"] == "10.2.4.0/24" - result.response.status.resources.static_routes_list[0]["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" fail_msg: "Static routes overriding failed" - success_msg: "Static routes overriden successfully" + success_msg: "Static routes overridden successfully" ########################################################################################################### -- name: Netgative scenario of cretaing multiple default routes +- name: Negative scenario of creating multiple default routes ntnx_static_routes: vpc_uuid: "{{ vpc.uuid }}" static_routes: diff --git a/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml b/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml index 4b79f1a08..98e3e79ba 100644 --- a/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml @@ -25,8 +25,8 @@ - result.response is defined - result.response.status.state == 'COMPLETE' - result.changed == true - fail_msg: 'Fail: Unable to update static routes of vpc' - success_msg: 'Succes: static routes updated successfully' + fail_msg: "Fail: Unable to update static routes of vpc" + success_msg: "Success: static routes updated successfully" ########################################################################################################### @@ -35,7 +35,6 @@ vpc_uuid: "{{ vpc.uuid }}" register: result - - set_fact: d1: "{{ result.response.status.resources.static_routes_list[0].destination }}" d2: "{{ result.response.status.resources.static_routes_list[1].destination }}" @@ -54,8 +53,8 @@ - result.response.status.resources.default_route["destination"] == "0.0.0.0/0" - result.response.status.resources.default_route["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" - fail_msg: 'Fail: Unable to get static routes for vpc' - success_msg: 'Succes' + fail_msg: "Fail: Unable to get static routes for vpc" + success_msg: "Success" ########################################################################################################### diff --git a/tests/integration/targets/ntnx_user_groups/tasks/create.yml b/tests/integration/targets/ntnx_user_groups/tasks/create.yml index 31fb156e3..7a11b4a28 100644 --- a/tests/integration/targets/ntnx_user_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_user_groups/tasks/create.yml @@ -143,7 +143,7 @@ - result.changed == true - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" fail_msg: "Unable to delete user group " - success_msg: "user group deletd successfully" + success_msg: "user group deleted successfully" # - name: create user group with idp diff --git a/tests/integration/targets/ntnx_users/tasks/create.yml b/tests/integration/targets/ntnx_users/tasks/create.yml index b6bdf0c4b..6c705ad19 100644 --- a/tests/integration/targets/ntnx_users/tasks/create.yml +++ b/tests/integration/targets/ntnx_users/tasks/create.yml @@ -20,7 +20,7 @@ - result.failed == false - result.user_uuid == None - result.response.spec.resources.directory_service_user.directory_service_reference.uuid == "{{directory_service_uuid}}" - fail_msg: "fail: user created whil check mode on" + fail_msg: "fail: user created while check mode on" success_msg: "pass: returned as expected" diff --git a/tests/integration/targets/ntnx_vms_clone/tasks/create.yml b/tests/integration/targets/ntnx_vms_clone/tasks/create.yml index cf168c02f..1bc50375d 100644 --- a/tests/integration/targets/ntnx_vms_clone/tasks/create.yml +++ b/tests/integration/targets/ntnx_vms_clone/tasks/create.yml @@ -5,25 +5,25 @@ copy: dest: "init_cloud.yml" content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: myNutanixVM + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: myNutanixVM -- name: VM with minimum requiremnts to clone +- name: VM with minimum requirements to clone ntnx_vms: - state: present - name: integration_test_clone_vm - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 + state: present + name: integration_test_clone_vm + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 register: vm ignore_errors: true @@ -32,19 +32,19 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requiremnts to clone ' - success_msg: 'Succes: VM with minimum requiremnts created successfully ' + fail_msg: "Fail: Unable to create VM with minimum requirements to clone " + success_msg: "Success: VM with minimum requirements created successfully " ############################## - name: clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - vcpus: 2 - cores_per_vcpu: 2 - memory_gb: 2 - name: cloned vm - timezone: GMT - force_power_off: true + src_vm_uuid: "{{ vm.vm_uuid }}" + vcpus: 2 + cores_per_vcpu: 2 + memory_gb: 2 + name: cloned vm + timezone: GMT + force_power_off: true register: result ignore_errors: true @@ -53,19 +53,19 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off' - success_msg: 'Succes: VM cloned successfully and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off ' + fail_msg: "Fail: Unable to clone vm and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off" + success_msg: "Success: VM cloned successfully and change vcpus,memory_gb,cores_per_vcpu,timezone,desc,name with force_power_off " - set_fact: - todelete: '{{ todelete + [ result.vm_uuid ] }}' + todelete: "{{ todelete + [ result.vm_uuid ] }}" ############################## - name: clone vm and add network ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - networks: - - is_connected: true - subnet: - uuid: "{{ static.uuid }}" + src_vm_uuid: "{{ vm.vm_uuid }}" + networks: + - is_connected: true + subnet: + uuid: "{{ static.uuid }}" register: result ignore_errors: true @@ -74,19 +74,19 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to clone vm while it is off ' - success_msg: 'Succes: VM cloned successfully ' + fail_msg: "Fail: Unable to clone vm while it is off " + success_msg: "Success: VM cloned successfully " - set_fact: - todelete: '{{ todelete + [ result.vm_uuid ] }}' + todelete: "{{ todelete + [ result.vm_uuid ] }}" ########################################### - name: clone vm with check mode ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - networks: - - is_connected: false - subnet: - name: "{{ network.dhcp.name }}" + src_vm_uuid: "{{ vm.vm_uuid }}" + networks: + - is_connected: false + subnet: + name: "{{ network.dhcp.name }}" register: result ignore_errors: true check_mode: yes @@ -98,16 +98,16 @@ - result.changed == false - result.failed == false - result.task_uuid != "" - success_msg: ' Success: returned response as expected ' - fail_msg: ' Fail: clone vm with check_mode ' + success_msg: " Success: returned response as expected " + fail_msg: " Fail: clone vm with check_mode " ########################################### - name: clone vm with script ntnx_vms_clone: - src_vm_uuid: "{{ vm.vm_uuid }}" - guest_customization: - type: "cloud_init" - script_path: "./init_cloud.yml" - is_overridable: True + src_vm_uuid: "{{ vm.vm_uuid }}" + guest_customization: + type: "cloud_init" + script_path: "./init_cloud.yml" + is_overridable: True register: result ignore_errors: true @@ -116,19 +116,19 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to clone vm vm with script' - success_msg: 'Succes: VM cloned with script successfully ' + fail_msg: "Fail: Unable to clone vm vm with script" + success_msg: "Success: VM cloned with script successfully " - set_fact: - todelete: '{{ todelete + [ result.vm_uuid ] }}' + todelete: "{{ todelete + [ result.vm_uuid ] }}" ########################################### - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - loop: '{{ todelete }}' + state: absent + vm_uuid: "{{ item }}" + loop: "{{ todelete }}" - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ vm.vm_uuid }}' + state: absent + vm_uuid: "{{ vm.vm_uuid }}" diff --git a/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml b/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml index 43570995d..dfe3c20c0 100644 --- a/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml +++ b/tests/integration/targets/nutanix_floating_ips_info/tasks/list_floating_ips.yml @@ -11,7 +11,7 @@ that: - result.response is defined fail_msg: " Unable to list floating_ips " - success_msg: " Floatong_ips listed successfully " + success_msg: " Floating_ips listed successfully " ############################################################## - name: List floating_ips using length and offset ntnx_floating_ips_info: @@ -26,7 +26,7 @@ that: - result.response is defined fail_msg: " Unable to list floating_ips " - success_msg: " Floatong_ips listed successfully " + success_msg: " Floating_ips listed successfully " ############################################################# - name: List floating_ips using ascending ip sorting ntnx_floating_ips_info: @@ -40,5 +40,5 @@ that: - result.response is defined fail_msg: " Unable to list floating_ips " - success_msg: " Floatong_ips listed successfully " + success_msg: " Floating_ips listed successfully " ############################################################# diff --git a/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml b/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml index ecb702975..8720e8fb5 100644 --- a/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml +++ b/tests/integration/targets/nutanix_subnets/tasks/negative_scenarios.yml @@ -1,84 +1,84 @@ - - debug: - msg: "Started Negative Creation Cases" +- debug: + msg: "Started Negative Creation Cases" - - name: Unknow virtual switch name - ntnx_subnets: - state: present - name: VLAN subnet without IPAM - vlan_subnet: - vlan_id: "{{ vlan_subnets_ids.0 }}" - virtual_switch: - name: "virtual_switch" - cluster: - uuid: "{{ cluster.uuid }}" - register: result - ignore_errors: True +- name: Unknown virtual switch name + ntnx_subnets: + state: present + name: VLAN subnet without IPAM + vlan_subnet: + vlan_id: "{{ vlan_subnets_ids.0 }}" + virtual_switch: + name: "virtual_switch" + cluster: + uuid: "{{ cluster.uuid }}" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.failed==True - - result.msg=="Failed generating subnet spec" - success_msg: ' Success: returned error as expected ' +- name: Creation Status + assert: + that: + - result.failed==True + - result.msg=="Failed generating subnet spec" + success_msg: " Success: returned error as expected " ############################################################### - - name: Unknow virtual switch uuid - ntnx_subnets: - state: present - name: VLAN subnet with IPAM - vlan_subnet: - vlan_id: "{{ vlan_subnets_ids.1 }}" - virtual_switch: - uuid: 91639374-c0b9-48c3-bfc1-f9c89343b3e - cluster: - name: "{{ cluster.name }}" - ipam: - network_ip: "{{ ip_address_management.network_ip }}" - network_prefix: "{{ ip_address_management.network_prefix }}" - gateway_ip: "{{ ip_address_management.gateway_ip_address }}" - register: result - ignore_errors: true +- name: Unknown virtual switch uuid + ntnx_subnets: + state: present + name: VLAN subnet with IPAM + vlan_subnet: + vlan_id: "{{ vlan_subnets_ids.1 }}" + virtual_switch: + uuid: 91639374-c0b9-48c3-bfc1-f9c89343b3e + cluster: + name: "{{ cluster.name }}" + ipam: + network_ip: "{{ ip_address_management.network_ip }}" + network_prefix: "{{ ip_address_management.network_prefix }}" + gateway_ip: "{{ ip_address_management.gateway_ip_address }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' +- name: Creation Status + assert: + that: + - result.failed==True + success_msg: " Success: returned error as expected " ############################################################### - - name: Unknown Cluster - ntnx_subnets: - state: present - name: VLAN subnet with IPAM and IP pools - vlan_subnet: - vlan_id: "{{vlan_subnets_ids.2}}" - virtual_switch: - name: "{{ virtual_switch.name }}" - cluster: - name: auto_cluster_prod_1a642ea0a5c - ipam: - network_ip: "{{ ip_address_management.network_ip }}" - network_prefix: "{{ ip_address_management.network_prefix }}" - gateway_ip: "{{ ip_address_management.gateway_ip_address }}" - ip_pools: - - start_ip: "{{ ip_address_pools.start_address }}" - end_ip: "{{ ip_address_pools.end_address }}" - register: result - ignore_errors: true +- name: Unknown Cluster + ntnx_subnets: + state: present + name: VLAN subnet with IPAM and IP pools + vlan_subnet: + vlan_id: "{{vlan_subnets_ids.2}}" + virtual_switch: + name: "{{ virtual_switch.name }}" + cluster: + name: auto_cluster_prod_1a642ea0a5c + ipam: + network_ip: "{{ ip_address_management.network_ip }}" + network_prefix: "{{ ip_address_management.network_prefix }}" + gateway_ip: "{{ ip_address_management.gateway_ip_address }}" + ip_pools: + - start_ip: "{{ ip_address_pools.start_address }}" + end_ip: "{{ ip_address_pools.end_address }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' +- name: Creation Status + assert: + that: + - result.failed==True + success_msg: " Success: returned error as expected " ############################################################### - - name: Delete subnet with unknown uuid - ntnx_subnets: - state: absent - subnet_uuid: 5 - register: resultt - ignore_errors: true +- name: Delete subnet with unknown uuid + ntnx_subnets: + state: absent + subnet_uuid: 5 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.failed==True - success_msg: ' Success: returned error as expected ' +- name: Creation Status + assert: + that: + - result.failed==True + success_msg: " Success: returned error as expected " diff --git a/tests/integration/targets/nutanix_vms/tasks/create.yml b/tests/integration/targets/nutanix_vms/tasks/create.yml index 9da6dfc96..b928c3dd6 100644 --- a/tests/integration/targets/nutanix_vms/tasks/create.yml +++ b/tests/integration/targets/nutanix_vms/tasks/create.yml @@ -1,601 +1,601 @@ - - name: Create Cloud-init Script file - copy: - dest: "cloud_init.yml" - content: | - #cloud-config - chpasswd: - list: | - root: "{{ password }}" - expire: False - fqdn: myNutanixVM +- name: Create Cloud-init Script file + copy: + dest: "cloud_init.yml" + content: | + #cloud-config + chpasswd: + list: | + root: "{{ password }}" + expire: False + fqdn: myNutanixVM ########################################################################## - - name: VM with none values - ntnx_vms: - state: present - name: none - timezone: GMT - project: - uuid: "{{ project.uuid }}" - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: DISK - size_gb: 5 - bus: SCSI - vcpus: - cores_per_vcpu: - memory_gb: - register: result - ignore_errors: true +- name: VM with none values + ntnx_vms: + state: present + name: none + timezone: GMT + project: + uuid: "{{ project.uuid }}" + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: DISK + size_gb: 5 + bus: SCSI + vcpus: + cores_per_vcpu: + memory_gb: + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to Create VM with none values ' - success_msg: 'VM with none values created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to Create VM with none values " + success_msg: "VM with none values created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' # ################################################################################## - - name: VM with owner name - ntnx_vms: - state: present - name: none - timezone: GMT - project: - uuid: "{{ project.uuid }}" - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - owner: - name: "{{ vm_owner.name }}" - disks: - - type: DISK - size_gb: 5 - bus: SCSI - register: result - ignore_errors: true +- name: VM with owner name + ntnx_vms: + state: present + name: none + timezone: GMT + project: + uuid: "{{ project.uuid }}" + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + owner: + name: "{{ vm_owner.name }}" + disks: + - type: DISK + size_gb: 5 + bus: SCSI + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" - - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" - - result.response.metadata.owner_reference.kind == "user" - fail_msg: 'Unable to Create VM with owner' - success_msg: 'VM with owner created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" + - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" + - result.response.metadata.owner_reference.kind == "user" + fail_msg: "Unable to Create VM with owner" + success_msg: "VM with owner created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' ################################################################################## - - name: VM with ubuntu image and different specifications - ntnx_vms: - state: present - project: - name: "{{ project.name }}" - name: "VM with Ubuntu image" - desc: "VM with cluster, network, category, disk with Ubuntu image, guest customization " - categories: - AppType: - - Default - Environment: - - Dev - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "{{ network.dhcp.name }}" - disks: - - type: "DISK" - size_gb: 30 - bus: "SATA" - clone_image: - name: "{{ ubuntu }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: result +- name: VM with ubuntu image and different specifications + ntnx_vms: + state: present + project: + name: "{{ project.name }}" + name: "VM with Ubuntu image" + desc: "VM with cluster, network, category, disk with Ubuntu image, guest customization " + categories: + AppType: + - Default + Environment: + - Dev + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: True + subnet: + name: "{{ network.dhcp.name }}" + disks: + - type: "DISK" + size_gb: 30 + bus: "SATA" + clone_image: + name: "{{ ubuntu }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: True + register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - - result.response.metadata.categories_mapping["AppType"] == ["Default"] - - result.response.metadata.categories_mapping["Environment"] == ["Dev"] - fail_msg: 'Unable to Create VM with Ubuntu image and different specifications ' - success_msg: 'VM with Ubuntu image and different specifications created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + - result.response.metadata.categories_mapping["AppType"] == ["Default"] + - result.response.metadata.categories_mapping["Environment"] == ["Dev"] + fail_msg: "Unable to Create VM with Ubuntu image and different specifications " + success_msg: "VM with Ubuntu image and different specifications created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ######################################################################################### - - name: VM with CentOS-7-cloud-init image with disk image size - ntnx_vms: - state: present - name: VM with CentOS-7-cloud-init image - memory_gb: 1 - timezone: "UTC" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with CentOS-7-cloud-init image' - success_msg: 'VM with CentOS-7-cloud-init image created successfully ' +- name: VM with CentOS-7-cloud-init image with disk image size + ntnx_vms: + state: present + name: VM with CentOS-7-cloud-init image + memory_gb: 1 + timezone: "UTC" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: True + register: result + ignore_errors: True +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with CentOS-7-cloud-init image" + success_msg: "VM with CentOS-7-cloud-init image created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ################################################################################# - - name: VM with CentOS-7-cloud-init image without disk image size - ntnx_vms: - state: present - memory_gb: 1 - name: VM with CentOS-7-cloud-init image without image size - timezone: "UTC" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - clone_image: - name: "{{ centos }}" - bus: "SCSI" - guest_customization: - type: "cloud_init" - script_path: "./cloud_init.yml" - is_overridable: True - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with CentOS-7-cloud-init image' - success_msg: 'VM with CentOS-7-cloud-init image created successfully ' +- name: VM with CentOS-7-cloud-init image without disk image size + ntnx_vms: + state: present + memory_gb: 1 + name: VM with CentOS-7-cloud-init image without image size + timezone: "UTC" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + clone_image: + name: "{{ centos }}" + bus: "SCSI" + guest_customization: + type: "cloud_init" + script_path: "./cloud_init.yml" + is_overridable: True + register: result + ignore_errors: True +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with CentOS-7-cloud-init image" + success_msg: "VM with CentOS-7-cloud-init image created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- set_fact: + todelete: [] ################################################################################# - - name: VM with Cluster, Network, Universal time zone, one Disk - ntnx_vms: - state: present - name: "VM with Cluster Network and Disk" - memory_gb: 1 - timezone: "Universal" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: False - subnet: - uuid: "{{ network.dhcp.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - bus: "PCI" - register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with Cluster , Network, Universal time zone, one Disk' - success_msg: 'VM with Cluster , Network, Universal time zone, one Disk created successfully ' +- name: VM with Cluster, Network, Universal time zone, one Disk + ntnx_vms: + state: present + name: "VM with Cluster Network and Disk" + memory_gb: 1 + timezone: "Universal" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: False + subnet: + uuid: "{{ network.dhcp.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + bus: "PCI" + register: result +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with Cluster , Network, Universal time zone, one Disk" + success_msg: "VM with Cluster , Network, Universal time zone, one Disk created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ######################################################################################## - - name: VM with Cluster, different Disks, Memory size - ntnx_vms: - state: present - name: "VM with different disks" - timezone: "UTC" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - bus: "SATA" - - type: "DISK" - size_gb: 30 - bus: "SCSI" - memory_gb: 2 - register: result - ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to create VM with Cluster, different Disks, Memory size' - success_msg: 'VM with Cluster, different Disks, Memory size created successfully ' +- name: VM with Cluster, different Disks, Memory size + ntnx_vms: + state: present + name: "VM with different disks" + timezone: "UTC" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + bus: "SATA" + - type: "DISK" + size_gb: 30 + bus: "SCSI" + memory_gb: 2 + register: result + ignore_errors: True +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to create VM with Cluster, different Disks, Memory size" + success_msg: "VM with Cluster, different Disks, Memory size created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ##################################################################################### - - name: VM with Cluster, different CDROMs - ntnx_vms: - state: present - memory_gb: 1 - wait: true - name: "VM with multiple CDROMs" - cluster: - name: "{{ cluster.name }}" - disks: - - type: "CDROM" - bus: "SATA" - empty_cdrom: True - - type: "CDROM" - bus: "IDE" - empty_cdrom: True - cores_per_vcpu: 1 - register: result - ignore_errors: True +- name: VM with Cluster, different CDROMs + ntnx_vms: + state: present + memory_gb: 1 + wait: true + name: "VM with multiple CDROMs" + cluster: + name: "{{ cluster.name }}" + disks: + - type: "CDROM" + bus: "SATA" + empty_cdrom: True + - type: "CDROM" + bus: "IDE" + empty_cdrom: True + cores_per_vcpu: 1 + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: 'Unable to Create VM with Cluster, different CDROMs ' - success_msg: 'VM with Cluster, different CDROMs created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: "Unable to Create VM with Cluster, different CDROMs " + success_msg: "VM with Cluster, different CDROMs created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- set_fact: + todelete: [] #################################################################################### - - name: VM with all specification - ntnx_vms: - state: present - wait: True - name: "All specification" - timezone: "GMT" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 2 - bus: "SCSI" - - type: "DISK" - size_gb: 10 - bus: "PCI" - - type: "DISK" - size_gb: 2 - bus: "SATA" - - type: "DISK" - size_gb: 10 - bus: "SCSI" - - type: "CDROM" - bus: "IDE" - empty_cdrom: True - boot_config: - boot_type: "UEFI" - boot_order: - - "DISK" - - "CDROM" - - "NETWORK" - vcpus: 1 - cores_per_vcpu: 2 - memory_gb: 1 - register: result - ignore_errors: True +- name: VM with all specification + ntnx_vms: + state: present + wait: True + name: "All specification" + timezone: "GMT" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 2 + bus: "SCSI" + - type: "DISK" + size_gb: 10 + bus: "PCI" + - type: "DISK" + size_gb: 2 + bus: "SATA" + - type: "DISK" + size_gb: 10 + bus: "SCSI" + - type: "CDROM" + bus: "IDE" + empty_cdrom: True + boot_config: + boot_type: "UEFI" + boot_order: + - "DISK" + - "CDROM" + - "NETWORK" + vcpus: 1 + cores_per_vcpu: 2 + memory_gb: 1 + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with all specification ' - success_msg: ' VM with all specification created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with all specification " + success_msg: " VM with all specification created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ################################################################################################## - - name: VM with managed subnet - ntnx_vms: - state: present - name: VM with managed subnet - memory_gb: 1 - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: true - subnet: - uuid: "{{ network.dhcp.uuid }}" - register: result - ignore_errors: true +- name: VM with managed subnet + ntnx_vms: + state: present + name: VM with managed subnet + memory_gb: 1 + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + uuid: "{{ network.dhcp.uuid }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with managed subnet ' - success_msg: ' VM with with managed subnet created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with managed subnet " + success_msg: " VM with with managed subnet created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ################################################################################################### - - name: VM with minimum requiremnts - ntnx_vms: - state: present - name: MinReqVM - cluster: - name: "{{ cluster.name }}" - register: result - ignore_errors: true +- name: VM with minimum requirements + ntnx_vms: + state: present + name: MinReqVM + cluster: + name: "{{ cluster.name }}" + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- set_fact: + todelete: [] ################################################################################################## - - name: VM with unmanaged vlan - ntnx_vms: - desc: "VM with unmanaged vlan" - state: present - name: VM with unmanaged vlan - timezone: UTC - cluster: - uuid: "{{ cluster.uuid }}" - networks: - - is_connected: false - subnet: - uuid: "{{ static.uuid }}" - private_ip: "{{ network.static.ip }}" - boot_config: - boot_type: LEGACY - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result - ignore_errors: true +- name: VM with unmanaged vlan + ntnx_vms: + desc: "VM with unmanaged vlan" + state: present + name: VM with unmanaged vlan + timezone: UTC + cluster: + uuid: "{{ cluster.uuid }}" + networks: + - is_connected: false + subnet: + uuid: "{{ static.uuid }}" + private_ip: "{{ network.static.ip }}" + boot_config: + boot_type: LEGACY + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with unmanaged vlan ' - success_msg: ' VM with unmanaged vlan created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with unmanaged vlan " + success_msg: " VM with unmanaged vlan created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VM - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' - - set_fact: - todelete: [] +- name: Delete all Created VM + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" +- set_fact: + todelete: [] ###################################################################################### - - name: VM with managed and unmanaged network - ntnx_vms: - state: present - name: VM_NIC - timezone: UTC - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: true - subnet: - name: "{{ network.dhcp.name }}" - cluster: - name: "{{ cluster.name }}" - - is_connected: true - subnet: - uuid: "{{ static.uuid }}" - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: DISK - size_gb: 1 - bus: SCSI - - type: DISK - size_gb: 3 - bus: PCI - - type: CDROM - bus: SATA - empty_cdrom: True - - type: CDROM - bus: IDE - empty_cdrom: True - boot_config: - boot_type: UEFI - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result - ignore_errors: true +- name: VM with managed and unmanaged network + ntnx_vms: + state: present + name: VM_NIC + timezone: UTC + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: true + subnet: + name: "{{ network.dhcp.name }}" + cluster: + name: "{{ cluster.name }}" + - is_connected: true + subnet: + uuid: "{{ static.uuid }}" + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: DISK + size_gb: 1 + bus: SCSI + - type: DISK + size_gb: 3 + bus: PCI + - type: CDROM + bus: SATA + empty_cdrom: True + - type: CDROM + bus: IDE + empty_cdrom: True + boot_config: + boot_type: UEFI + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result + ignore_errors: true - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with managed and unmanaged network ' - success_msg: ' VM with managed and unmanaged network created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with managed and unmanaged network " + success_msg: " VM with managed and unmanaged network created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' ######################################################################################### - - name: VM with diffrent disk types and diffrent sizes with UEFI boot type - ntnx_vms: - state: present - name: VM with UEFI boot type - timezone: GMT - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - name: "{{ storage_container.name }}" - - type: DISK - size_gb: 2 - bus: PCI - storage_container: - name: "{{ storage_container.name }}" - - type: DISK - size_gb: 3 - bus: SATA - boot_config: - boot_type: UEFI - boot_order: - - DISK - - CDROM - - NETWORK - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result +- name: VM with different disk types and different sizes with UEFI boot type + ntnx_vms: + state: present + name: VM with UEFI boot type + timezone: GMT + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + name: "{{ storage_container.name }}" + - type: DISK + size_gb: 2 + bus: PCI + storage_container: + name: "{{ storage_container.name }}" + - type: DISK + size_gb: 3 + bus: SATA + boot_config: + boot_type: UEFI + boot_order: + - DISK + - CDROM + - NETWORK + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result ################################################################################ - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with diffrent disk types and diffrent sizes with UEFI boot type ' - success_msg: ' VM with diffrent disk types and diffrent sizes with UEFI boot type created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with different disk types and different sizes with UEFI boot type " + success_msg: " VM with different disk types and different sizes with UEFI boot type created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' - - name: Delete all Created VM - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' +- name: Delete all Created VM + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" - - set_fact: - todelete: [] +- set_fact: + todelete: [] #################################################################################### - - name: VM with storage container - ntnx_vms: - state: present - name: VM with UEFI boot type - timezone: GMT - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: DISK - size_gb: 1 - bus: SCSI - storage_container: - uuid: "{{ storage_container.uuid }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result +- name: VM with storage container + ntnx_vms: + state: present + name: VM with UEFI boot type + timezone: GMT + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: DISK + size_gb: 1 + bus: SCSI + storage_container: + uuid: "{{ storage_container.uuid }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result - - name: Creation Status - assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM withstorage container ' - success_msg: ' VM with storage container created successfully ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with storage container " + success_msg: " VM with storage container created successfully " - - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' - when: result.response.status.state == 'COMPLETE' +- set_fact: + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + when: result.response.status.state == 'COMPLETE' #################################################################################### - - name: Delete all Created VMs - ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - register: result - loop: '{{ todelete }}' +- name: Delete all Created VMs + ntnx_vms: + state: absent + vm_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" diff --git a/tests/integration/targets/nutanix_vms/tasks/delete.yml b/tests/integration/targets/nutanix_vms/tasks/delete.yml index b1cf3046d..e78ab6416 100644 --- a/tests/integration/targets/nutanix_vms/tasks/delete.yml +++ b/tests/integration/targets/nutanix_vms/tasks/delete.yml @@ -1,20 +1,20 @@ --- -- name: VM with minimum requiremnts +- name: VM with minimum requirements ntnx_vms: - state: present - name: MinReqVM - cluster: - name: "{{ cluster.name }}" + state: present + name: MinReqVM + cluster: + name: "{{ cluster.name }}" register: result ignore_errors: true - name: Creation Status assert: - that: - - result.response is defined - - result.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + that: + - result.response is defined + - result.response.status.state == 'COMPLETE' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " - name: Delete VM ntnx_vms: diff --git a/tests/integration/targets/nutanix_vms/tasks/main.yml b/tests/integration/targets/nutanix_vms/tasks/main.yml index 1a9593038..34b67b801 100644 --- a/tests/integration/targets/nutanix_vms/tasks/main.yml +++ b/tests/integration/targets/nutanix_vms/tasks/main.yml @@ -1,14 +1,14 @@ --- - module_defaults: - group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create.yml" - - import_tasks: "negtaive_scenarios.yml" - - import_tasks: "delete.yml" - - import_tasks: "vm_operations.yml" - - import_tasks: "vm_update.yml" - - import_tasks: "negtaive_vm_update.yml" + - import_tasks: "create.yml" + - import_tasks: "negative_scenarios.yml" + - import_tasks: "delete.yml" + - import_tasks: "vm_operations.yml" + - import_tasks: "vm_update.yml" + - import_tasks: "negative_vm_update.yml" diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml index f003d4459..66b6a9c8f 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_scenarios.yml @@ -1,309 +1,307 @@ - - debug: - msg: "Started Negative Creation Cases" +- debug: + msg: "Started Negative Creation Cases" - - name: Unknown project name - ntnx_vms: - state: present - name: Unknown project name - timezone: "UTC" - project: - name: project - cluster: - uuid: "{{ cluster.uuid }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown project name + ntnx_vms: + state: present + name: Unknown project name + timezone: "UTC" + project: + name: project + cluster: + uuid: "{{ cluster.uuid }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.msg == "Failed generating VM Spec" - - result.failed == True - - result.failed is defined - - result.error == "Project project not found." - success_msg: ' Success: returned error as expected ' +- name: Creation Status + assert: + that: + - result.msg == "Failed generating VM Spec" + - result.failed == True + - result.failed is defined + - result.error == "Project project not found." + success_msg: " Success: returned error as expected " ############################################################# - - name: Check if error is produced when disk size is not given for storage container - check_mode: yes - ntnx_vms: - state: present - name: VM with storage container - timezone: GMT - cluster: - name: "{{ cluster.name }}" - categories: - AppType: - - Apache_Spark - disks: - - type: DISK - bus: SCSI - storage_container: - name: "{{ storage_container.name }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - register: result - ignore_errors: True +- name: Check if error is produced when disk size is not given for storage container + check_mode: yes + ntnx_vms: + state: present + name: VM with storage container + timezone: GMT + cluster: + name: "{{ cluster.name }}" + categories: + AppType: + - Apache_Spark + disks: + - type: DISK + bus: SCSI + storage_container: + name: "{{ storage_container.name }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.msg == "Unsupported operation: Unable to create disk, 'size_gb' is required for using storage container." - - result.failed == True - - result.failed is defined - success_msg: ' Success: returned error as expected ' +- name: Creation Status + assert: + that: + - result.msg == "Unsupported operation: Unable to create disk, 'size_gb' is required for using storage container." + - result.failed == True + - result.failed is defined + success_msg: " Success: returned error as expected " ################################################################################## - - name: Unknown Cluster - ntnx_vms: - state: present - name: Unknown Cluster - timezone: "UTC" - cluster: - uuid: "auto_cluster_1aa888141361" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown Cluster + ntnx_vms: + state: present + name: Unknown Cluster + timezone: "UTC" + cluster: + uuid: "auto_cluster_1aa888141361" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.response.state == 'ERROR' - - result.status_code == 422 - - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail Vm created successfully with unknown cluster ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + - result.response.state == 'ERROR' + - result.status_code == 422 + - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" + success_msg: " Success: returned error as expected " + fail_msg: " Fail Vm created successfully with unknown cluster " ################################################################################ - - name: Unknown Cluster name - ntnx_vms: - state: present - name: Unknown Cluster - timezone: "UTC" - cluster: - name: "auto_cluster" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "{{ centos }}" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown Cluster name + ntnx_vms: + state: present + name: Unknown Cluster + timezone: "UTC" + cluster: + name: "auto_cluster" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "{{ centos }}" + bus: "SCSI" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.msg == "Failed generating VM Spec" - - result.failed == True - - result.response is defined - - result.error == "Cluster auto_cluster not found." - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail Vm created successfully with unknown cluster ' +- name: Creation Status + assert: + that: + - result.msg == "Failed generating VM Spec" + - result.failed == True + - result.response is defined + - result.error == "Cluster auto_cluster not found." + success_msg: " Success: returned error as expected " + fail_msg: " Fail Vm created successfully with unknown cluster " ################################################################################### - - name: Unknown Network name - ntnx_vms: - state: present - name: Unknown Network - desc: "Unknown network" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "vlan.8000" - register: result - ignore_errors: True +- name: Unknown Network name + ntnx_vms: + state: present + name: Unknown Network + desc: "Unknown network" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: True + subnet: + name: "vlan.8000" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.msg == "Failed generating VM Spec" - - result.error == "Subnet vlan.8000 not found." - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with unknown network name ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + - result.msg == "Failed generating VM Spec" + - result.error == "Subnet vlan.8000 not found." + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with unknown network name " ################################################################################### - - name: Unknown Network uuid - ntnx_vms: - state: present - name: Unknown Network - desc: "Unknown network" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - uuid: "8000" - register: result - ignore_errors: True +- name: Unknown Network uuid + ntnx_vms: + state: present + name: Unknown Network + desc: "Unknown network" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: True + subnet: + uuid: "8000" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" - - result.response.state == 'ERROR' - - result.status_code == 422 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with unknown network name ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + - result.error == "HTTP Error 422: UNPROCESSABLE ENTITY" + - result.response.state == 'ERROR' + - result.status_code == 422 + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with unknown network name " ################################################################################### - - name: Unknow Image name - ntnx_vms: - state: present - name: unknown image_vm - timezone: "UTC" - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - size_gb: 10 - clone_image: - name: "centos-7-cloudinit" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown Image name + ntnx_vms: + state: present + name: unknown image_vm + timezone: "UTC" + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + size_gb: 10 + clone_image: + name: "centos-7-cloudinit" + bus: "SCSI" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - - result.response.state == 'ERROR' - - result.status_code == 422 - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with not existed image ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + - result.response.state == 'ERROR' + - result.status_code == 422 + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with not existed image " ######################################################################################## - - name: Wrong disk size value - ntnx_vms: - state: present - name: "Wrong disk size value" - timezone: "UTC" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "{{ network.dhcp.name }}" - disks: - - type: "DISK" - size_gb: 10g - bus: "PCI" - register: result - ignore_errors: True +- name: Wrong disk size value + ntnx_vms: + state: present + name: "Wrong disk size value" + timezone: "UTC" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: True + subnet: + name: "{{ network.dhcp.name }}" + disks: + - type: "DISK" + size_gb: 10g + bus: "PCI" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with invalid argument for size_gb ' +- name: Creation Status + assert: + that: + - result.failed == True + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with invalid argument for size_gb " ############################################################################################# - - name: Image size less than actual - ntnx_vms: - state: present - name: "image size less than actual" - categories: - AppType: - - "Apache_Spark" - cluster: - name: "{{ cluster.name }}" - networks: - - is_connected: True - subnet: - name: "{{ network.dhcp.name }}" - disks: - - type: "DISK" - size_gb: 2 #must be 20 - bus: "SATA" - clone_image: - name: "{{ centos }}" - vcpus: 1 - cores_per_vcpu: 1 - memory_gb: 1 - guest_customization: - type: "cloud_init" - script_path: "cloud_init.yml" - is_overridable: True - register: result - ignore_errors: True +- name: Image size less than actual + ntnx_vms: + state: present + name: "image size less than actual" + categories: + AppType: + - "Apache_Spark" + cluster: + name: "{{ cluster.name }}" + networks: + - is_connected: True + subnet: + name: "{{ network.dhcp.name }}" + disks: + - type: "DISK" + size_gb: 2 #must be 20 + bus: "SATA" + clone_image: + name: "{{ centos }}" + vcpus: 1 + cores_per_vcpu: 1 + memory_gb: 1 + guest_customization: + type: "cloud_init" + script_path: "cloud_init.yml" + is_overridable: True + register: result + ignore_errors: True - - - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail: VM created successfully with image size is less than actual ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + success_msg: " Success: returned error as expected " + fail_msg: " Fail: VM created successfully with image size is less than actual " ################################################################################# - - name: Unknow storage container name - ntnx_vms: - state: present - name: unknown storage container - timezone: "UTC" - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - size_gb: 10 - storage_container: - name: "storage" - bus: "SCSI" - register: result - ignore_errors: True +- name: Unknown storage container name + ntnx_vms: + state: present + name: unknown storage container + timezone: "UTC" + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + size_gb: 10 + storage_container: + name: "storage" + bus: "SCSI" + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail VM created successfully with unknown storage container name ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + success_msg: " Success: returned error as expected " + fail_msg: " Fail VM created successfully with unknown storage container name " ################################################################################# - - name: Delete vm with unknown uuid - ntnx_vms: - state: absent - vm_uuid: 5 - register: result - ignore_errors: True +- name: Delete vm with unknown uuid + ntnx_vms: + state: absent + vm_uuid: 5 + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.response is defined - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting VM with unknown uuid ' +- name: Creation Status + assert: + that: + - result.response is defined + - result.failed == True + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting VM with unknown uuid " ################################################################################# - - name: Delete vm with missing uuid - ntnx_vms: - state: absent - register: result - ignore_errors: True +- name: Delete vm with missing uuid + ntnx_vms: + state: absent + register: result + ignore_errors: True - - name: Creation Status - assert: - that: - - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail deleting VM with missing uuid ' +- name: Creation Status + assert: + that: + - result.failed == True + success_msg: " Success: returned error as expected " + fail_msg: " Fail deleting VM with missing uuid " diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml index a17b52529..d2c0481d3 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml @@ -1,4 +1,4 @@ -- name: create VM with minimum requiremnts to update +- name: create VM with minimum requirements to update ntnx_vms: state: present name: update vm @@ -43,8 +43,8 @@ - vm.response.status.state == 'COMPLETE' - vm.vm_uuid - vm.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " - name: update vm without change any value ntnx_vms: @@ -55,15 +55,14 @@ register: result ignore_errors: true - - name: Update Status assert: that: - - result.failed == false - - result.changed == false - - result.msg == 'Nothing to change' - fail_msg: 'Fail : VM updated successfully with same current values ' - success_msg: ' Success: returned error as expected ' + - result.failed == false + - result.changed == false + - result.msg == 'Nothing to change' + fail_msg: "Fail : VM updated successfully with same current values " + success_msg: " Success: returned error as expected " ############################################################### - debug: msg: Start negative update scenarios tests for memory vcpus cores_per_vcpu @@ -78,11 +77,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : decrease the value for vcpus while while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : decrease the value for vcpus while while vm is on " + success_msg: " Success: returned error as expected " - name: decrease values for memory_gb without force_power_off and vm is on ntnx_vms: @@ -94,11 +93,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : decrease the value for memory_gb while while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : decrease the value for memory_gb while while vm is on " + success_msg: " Success: returned error as expected " - name: decrease values for cores_per_vcpu without force_power_off and vm is on ntnx_vms: @@ -110,16 +109,16 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : decrease the value for cores_per_vcpu while while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : decrease the value for cores_per_vcpu while while vm is on " + success_msg: " Success: returned error as expected " ############################################################### - debug: msg: Start negative update scenarios tests for disks -############ negative test : Decrase size +############ negative test : Decrease size - name: Update VM by decreasing the size of the disk that contains the image with SCSI bus type ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" @@ -134,9 +133,8 @@ assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the disk that contains the image with SCSI bus type ' - success_msg: ' Success: returned error as expected ' - + fail_msg: " Fail: decreasing the size of the disk that contains the image with SCSI bus type " + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the SCSI disk with storage container ntnx_vms: @@ -152,8 +150,8 @@ assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the SCSI disk with storage container ' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the SCSI disk with storage container " + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the empty ide cdrom #error ntnx_vms: @@ -171,8 +169,8 @@ - result.msg == 'Unsupported operation: Cannot resize empty cdrom.' - result.changed == false - result.failed == true - fail_msg: ' Fail: change the size of the empty CDROM' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: change the size of the empty CDROM" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the pci disk ntnx_vms: @@ -188,8 +186,8 @@ assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the pci disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the pci disk" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the sata disk ntnx_vms: @@ -205,8 +203,8 @@ assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the sata disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the sata disk" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the SCSI disk ntnx_vms: @@ -222,8 +220,8 @@ assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the SCSI disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the SCSI disk" + success_msg: " Success: returned error as expected " - name: Update VM by decreasing the size of the IDE disk ntnx_vms: @@ -239,8 +237,8 @@ assert: that: - result.msg == ' Unsupported operation: Unable to decrease disk size.' - fail_msg: ' Fail: decreasing the size of the IDE disk' - success_msg: ' Success: returned error as expected ' + fail_msg: " Fail: decreasing the size of the IDE disk" + success_msg: " Success: returned error as expected " ################ - name: Update VM by change ths bus type of ide disk ntnx_vms: @@ -257,8 +255,8 @@ that: - result.msg == ' parameters are mutually exclusive: uuid|bus found in disks ' - result.failed == True - success_msg: ' Success: returned error as expected ' - fail_msg: ' Fail: Update VM by change ths bus type of ide disk sucessfuly ' + success_msg: " Success: returned error as expected " + fail_msg: " Fail: Update VM by change ths bus type of ide disk successfully " ############ - name: Update VM by adding IDE disk while vm is on ntnx_vms: @@ -273,11 +271,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by add ide disk while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by add ide disk while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by adding SATA disk while vm is on ntnx_vms: @@ -292,11 +290,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by add SATA disk while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by add SATA disk while vm is on " + success_msg: " Success: returned error as expected " ############# - name: Update VM by removing IDE disks while vm is on ntnx_vms: @@ -309,11 +307,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing IDE disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing IDE disks while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by removing IDE disks while vm is on ntnx_vms: @@ -327,11 +325,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing IDE disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing IDE disks while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by removing PCI disks while vm is on ntnx_vms: @@ -345,11 +343,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing PCI disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing PCI disks while vm is on " + success_msg: " Success: returned error as expected " - name: Update VM by removing SATA disks while vm is on ntnx_vms: @@ -363,11 +361,11 @@ - name: Update Status assert: that: - - result.failed == True - - result.changed == false - - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" - fail_msg: 'Fail : update vm by by removing SATA disks while vm is on ' - success_msg: ' Success: returned error as expected ' + - result.failed == True + - result.changed == false + - result.msg == "To make these changes, the VM should be restarted, but 'force_power_off' is False" + fail_msg: "Fail : update vm by by removing SATA disks while vm is on " + success_msg: " Success: returned error as expected " ########################################################### - name: Delete created vm's ntnx_vms: @@ -382,5 +380,5 @@ - result.response.status == 'SUCCEEDED' - result.vm_uuid - result.task_uuid - fail_msg: 'Fail: Unable to delete created vm ' - success_msg: 'Success: Vm deleted sucessfully' + fail_msg: "Fail: Unable to delete created vm " + success_msg: "Success: Vm deleted successfully" diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml b/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml index d64f9f755..38fb9edc6 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_operations.yml @@ -1,20 +1,20 @@ - debug: - msg: Start testing VM with different opperations + msg: Start testing VM with different operations - set_fact: - todelete: [] -- name: VM with minimum requiremnts + todelete: [] +- name: VM with minimum requirements ntnx_vms: - state: present - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 + state: present + name: integration_test_operations_vm + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 register: vm ignore_errors: true @@ -23,22 +23,22 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " ############################################ -- name: VM with minimum requiremnts with check mode +- name: VM with minimum requirements with check mode ntnx_vms: - state: present - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" - disks: - - type: "DISK" - clone_image: - name: "{{ ubuntu }}" - bus: "SCSI" - size_gb: 20 + state: present + name: integration_test_operations_vm + cluster: + name: "{{ cluster.name }}" + disks: + - type: "DISK" + clone_image: + name: "{{ ubuntu }}" + bus: "SCSI" + size_gb: 20 register: result ignore_errors: true check_mode: yes @@ -50,13 +50,13 @@ - result.changed == false - result.failed == false - result.task_uuid != "" - success_msg: ' Success: returned as expected ' - fail_msg: ' Fail ' + success_msg: " Success: returned as expected " + fail_msg: " Fail " ########################################### - name: hard power off the vm ntnx_vms: - vm_uuid: "{{ vm.vm_uuid }}" - state: hard_poweroff + vm_uuid: "{{ vm.vm_uuid }}" + state: hard_poweroff register: result ignore_errors: true @@ -66,13 +66,13 @@ - result.response is defined - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'OFF' - fail_msg: ' Unable to hard power off the vm ' - success_msg: ' VM powerd off successfully ' + fail_msg: " Unable to hard power off the vm " + success_msg: " VM was powered off successfully " # ########################################### - name: power on the vm ntnx_vms: - state: power_on - vm_uuid: "{{ vm.vm_uuid }}" + state: power_on + vm_uuid: "{{ vm.vm_uuid }}" register: result ignore_errors: true @@ -82,13 +82,13 @@ - result.response is defined - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'ON' - fail_msg: ' Unable to power on vm ' - success_msg: ' VM powerd on successfully ' + fail_msg: " Unable to power on vm " + success_msg: " VM was powered on successfully " ########################################## - name: power on the vm while it's on ntnx_vms: - state: power_on - vm_uuid: "{{ vm.vm_uuid }}" + state: power_on + vm_uuid: "{{ vm.vm_uuid }}" register: result ignore_errors: true @@ -96,8 +96,8 @@ assert: that: - result.msg == "Nothing to change" - success_msg: ' Success: returned msg as expected ' - fail_msg: ' Fail ' + success_msg: " Success: returned msg as expected " + fail_msg: " Fail " ########################################## # - name: soft shut down the vm # ntnx_vms: @@ -116,10 +116,10 @@ # success_msg: ' VM soft_shutdown successfully ' ########################################### ############################### -# - name: VM with minimum requiremnts and soft_shutdown +# - name: VM with minimum requirements and soft_shutdown # ntnx_vms: # state: present -# name: integration_test_opperations_vm +# name: integration_test_operations_vm # operation: soft_shutdown # cluster: # name: "{{ cluster.name }}" @@ -139,18 +139,18 @@ # - result.response.status.state == 'COMPLETE' # - result.response.status.resources.power_state == 'OFF' # - result.response.status.resources.power_state_mechanism.mechanism == 'ACPI' -# fail_msg: ' Unable to create VM with minimum requiremnts and soft_shutdown ' -# success_msg: ' VM with minimum requiremnts created successfully and soft_shutdown ' +# fail_msg: ' Unable to create VM with minimum requirements and soft_shutdown ' +# success_msg: ' VM with minimum requirements created successfully and soft_shutdown ' # - set_fact: # todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' -- name: Create VM with minimum requiremnts with hard_poweroff opperation +- name: Create VM with minimum requirements with hard_poweroff operation ntnx_vms: - state: hard_poweroff - name: integration_test_opperations_vm - cluster: - name: "{{ cluster.name }}" + state: hard_poweroff + name: integration_test_operations_vm + cluster: + name: "{{ cluster.name }}" register: result ignore_errors: true @@ -161,19 +161,19 @@ - result.response.status.state == 'COMPLETE' - result.response.status.resources.power_state == 'OFF' - result.response.status.resources.power_state_mechanism.mechanism == 'HARD' - fail_msg: ' Unable to create VM with minimum requiremnts with hard_poweroff opperation ' - success_msg: ' VM with minimum requiremnts and hard_poweroff state created successfully ' + fail_msg: " Unable to create VM with minimum requirements with hard_poweroff operation " + success_msg: " VM with minimum requirements and hard_poweroff state created successfully " - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' -- name: Create VM with minimum requiremnts with hard_poweroff opperation without wait +- name: Create VM with minimum requirements with hard_poweroff operation without wait ntnx_vms: - state: hard_poweroff - name: integration_test_opperations_vm_111 - cluster: - name: "{{ cluster.name }}" - wait: false + state: hard_poweroff + name: integration_test_operations_vm_111 + cluster: + name: "{{ cluster.name }}" + wait: false register: result ignore_errors: true @@ -184,23 +184,23 @@ - result.response.status.state == 'COMPLETE' or result.response.status.state == 'PENDING' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts with hard_poweroff opperation ' - success_msg: ' VM with minimum requiremnts and hard_poweroff state created successfully ' + fail_msg: " Unable to create VM with minimum requirements with hard_poweroff operation " + success_msg: " VM with minimum requirements and hard_poweroff state created successfully " - set_fact: - todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' + todelete: '{{ todelete + [ result["response"]["metadata"]["uuid"] ] }}' when: result.response.status.state == 'COMPLETE' - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ item }}' - loop: '{{ todelete }}' + state: absent + vm_uuid: "{{ item }}" + loop: "{{ todelete }}" - name: Delete all Created VMs ntnx_vms: - state: absent - vm_uuid: '{{ vm.vm_uuid }}' + state: absent + vm_uuid: "{{ vm.vm_uuid }}" - set_fact: - todelete: [] + todelete: [] diff --git a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml index fa0ccd60e..df504b7bd 100644 --- a/tests/integration/targets/nutanix_vms/tasks/vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/vm_update.yml @@ -1,14 +1,14 @@ # ########################### UPDATE_VM ################################ -- name: create VM with minimum requiremnts to update +- name: create VM with minimum requirements to update ntnx_vms: state: present name: update vm cluster: name: "{{ cluster.name }}" categories: - Environment: - - Production + Environment: + - Production vcpus: 5 cores_per_vcpu: 5 memory_gb: 5 @@ -23,8 +23,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " #################################################################### - name: update vm by set owner by uuid ntnx_vms: @@ -44,8 +44,8 @@ - result.response.metadata.owner_reference.name == "{{ vm_owner.name }}" - result.response.metadata.owner_reference.uuid == "{{ vm_owner.uuid }}" - result.response.metadata.owner_reference.kind == "user" - fail_msg: ' Unable to update vm by setting owner ' - success_msg: ' VM updated successfully by setting owner ' + fail_msg: " Unable to update vm by setting owner " + success_msg: " VM updated successfully by setting owner " #################################################################### - debug: @@ -69,8 +69,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by decrease the values for memory, vcpus and corespervcpu with force_power_off ' - success_msg: ' VM updated successfully by decrease the values for memory, vcpus and corespervcpu with force_power_off ' + fail_msg: " Unable to update vm by decrease the values for memory, vcpus and corespervcpu with force_power_off " + success_msg: " VM updated successfully by decrease the values for memory, vcpus and corespervcpu with force_power_off " - name: increase values for memory, vcpus and corespervcpu ntnx_vms: @@ -87,8 +87,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increase values for memory, vcpus ' - success_msg: ' VM updated successfully by increase values for memory, vcpus ' + fail_msg: " Unable to update vm by increase values for memory, vcpus " + success_msg: " VM updated successfully by increase values for memory, vcpus " - name: increase values for corespervcpu with force_power_off ntnx_vms: @@ -105,8 +105,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increase values for corespervcpu with force_power_off' - success_msg: ' VM updated successfully by increase values for corespervcpu with force_power_off ' + fail_msg: " Unable to update vm by increase values for corespervcpu with force_power_off" + success_msg: " VM updated successfully by increase values for corespervcpu with force_power_off " #################################################################### - debug: @@ -116,10 +116,10 @@ ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" categories: - Environment: - - Dev - AppType: - - Default + Environment: + - Dev + AppType: + - Default register: result ignore_errors: true @@ -132,10 +132,10 @@ - result.response.status.state == "COMPLETE" - result.response.metadata.categories_mapping["Environment"] == ["Dev"] - result.response.metadata.categories_mapping["AppType"] == ["Default"] - fail_msg: ' Unable to update categories attached to vm' - success_msg: ' VM categories updated successfully ' + fail_msg: " Unable to update categories attached to vm" + success_msg: " VM categories updated successfully " -- name: remove all categoies attached to vm +- name: remove all categories attached to vm ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" remove_categories: true @@ -150,13 +150,13 @@ - result.task_uuid - result.response.status.state == "COMPLETE" - result.response.metadata.categories_mapping == {} - fail_msg: ' Unable to remove all categories attached to vm' - success_msg: ' All VM categories removed successfully ' + fail_msg: " Unable to remove all categories attached to vm" + success_msg: " All VM categories removed successfully " ################################################################### - debug: msg: Start update tests for disks -##### CRUD opperation for SCSI disks +##### CRUD operation for SCSI disks - name: Update VM by adding SCSI disks ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -184,8 +184,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding SCSI disks ' - success_msg: ' VM updated successfully by adding SCSI disks ' + fail_msg: " Unable to update vm by adding SCSI disks " + success_msg: " VM updated successfully by adding SCSI disks " - name: Update VM by increasing the size of the SCSI disks ntnx_vms: @@ -210,8 +210,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the SCSI disks ' - success_msg: ' VM updated successfully by increasing the size of the SCSI disks ' + fail_msg: " Unable to update vm by increasing the size of the SCSI disks " + success_msg: " VM updated successfully by increasing the size of the SCSI disks " - name: Update VM by removing SCSI disks ntnx_vms: @@ -233,10 +233,10 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing SCSI disks ' - success_msg: ' VM updated successfully by removing SCSI disks ' + fail_msg: " Unable to update vm by removing SCSI disks " + success_msg: " VM updated successfully by removing SCSI disks " ####### -##### CRUD opperation for PCI disks +##### CRUD operation for PCI disks - name: Update VM by adding PCI disks ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -254,8 +254,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding PCI disks ' - success_msg: ' VM updated successfully by adding PCI disks ' + fail_msg: " Unable to update vm by adding PCI disks " + success_msg: " VM updated successfully by adding PCI disks " - name: Update VM by increasing the size of the PCI disks ntnx_vms: @@ -274,8 +274,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the PCI disks ' - success_msg: ' VM updated successfully by increasing the size of the PCI disks ' + fail_msg: " Unable to update vm by increasing the size of the PCI disks " + success_msg: " VM updated successfully by increasing the size of the PCI disks " - name: Update VM by removing PCI disks with force_power_off ntnx_vms: @@ -294,9 +294,9 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing PCI disks with force_power_off ' - success_msg: ' VM updated successfully by removing PCI disks with force_power_off ' -##### CRUD opperation for IDE disks + fail_msg: " Unable to update vm by removing PCI disks with force_power_off " + success_msg: " VM updated successfully by removing PCI disks with force_power_off " +##### CRUD operation for IDE disks - name: Update VM by adding IDE disks with force_power_off ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -318,8 +318,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding IDE disks with force_power_off ' - success_msg: ' VM updated successfully by adding IDE disks with force_power_off ' + fail_msg: " Unable to update vm by adding IDE disks with force_power_off " + success_msg: " VM updated successfully by adding IDE disks with force_power_off " - name: Update VM by increasing the size of the IDE disks with force_power_off ntnx_vms: @@ -339,8 +339,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the IDE disks with force_power_off ' - success_msg: ' VM updated successfully by increasing the size of the IDE disks with force_power_off ' + fail_msg: " Unable to update vm by increasing the size of the IDE disks with force_power_off " + success_msg: " VM updated successfully by increasing the size of the IDE disks with force_power_off " - name: Update VM by removing IDE disks with force_power_off ntnx_vms: @@ -361,10 +361,10 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing IDE disks with force_power_off' - success_msg: ' VM updated successfully by removing IDE disks with force_power_off' + fail_msg: " Unable to update vm by removing IDE disks with force_power_off" + success_msg: " VM updated successfully by removing IDE disks with force_power_off" ####### -##### CRUD opperation for SATA disks +##### CRUD operation for SATA disks - name: Update VM by adding SATA disks with force_power_off ntnx_vms: vm_uuid: "{{ result.vm_uuid }}" @@ -383,8 +383,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding SATA disks with force_power_off' - success_msg: ' VM updated successfully by adding SATA disks with force_power_off' + fail_msg: " Unable to update vm by adding SATA disks with force_power_off" + success_msg: " VM updated successfully by adding SATA disks with force_power_off" - name: Update VM by increasing the size of the SATA disks with force_power_off ntnx_vms: @@ -404,8 +404,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by increasing the size of the SATA disks with force_power_off ' - success_msg: ' VM updated successfully by increasing the size of the SATA disks with force_power_off' + fail_msg: " Unable to update vm by increasing the size of the SATA disks with force_power_off " + success_msg: " VM updated successfully by increasing the size of the SATA disks with force_power_off" - name: Update VM by removing SATA disks with force_power_off ntnx_vms: @@ -424,8 +424,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by removing SATA disks with force_power_off' - success_msg: ' VM updated successfully by removing SATA disks with force_power_off ' + fail_msg: " Unable to update vm by removing SATA disks with force_power_off" + success_msg: " VM updated successfully by removing SATA disks with force_power_off " # #################################################################### - debug: @@ -452,8 +452,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by adding subnets ' - success_msg: ' VM updated successfully by adding subnets' + fail_msg: " Unable to update vm by adding subnets " + success_msg: " VM updated successfully by adding subnets" - name: Update VM by editing a subnet is_connected ntnx_vms: @@ -474,8 +474,8 @@ - result.vm_uuid - result.task_uuid - result.response.status.state == "COMPLETE" - fail_msg: ' Unable to update vm by editing a subnet ' - success_msg: ' VM updated successfully by editing a subnet ' + fail_msg: " Unable to update vm by editing a subnet " + success_msg: " VM updated successfully by editing a subnet " - name: Update VM by change the private ip for subnet ntnx_vms: @@ -496,8 +496,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by editing private_ip for subnet ' - success_msg: ' VM updated successfully by editing private_ip for subnet' + fail_msg: " Unable to update vm by editing private_ip for subnet " + success_msg: " VM updated successfully by editing private_ip for subnet" - name: Update VM by change vlan subnet ntnx_vms: @@ -522,8 +522,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by editing a subnet vlan ' - success_msg: ' VM updated successfully by editing a subnet vlan ' + fail_msg: " Unable to update vm by editing a subnet vlan " + success_msg: " VM updated successfully by editing a subnet vlan " - name: Update VM by deleting a subnet ntnx_vms: @@ -543,8 +543,8 @@ - result.response.status.state == 'COMPLETE' - result.vm_uuid - result.task_uuid - fail_msg: ' Unable to update vm by deleting a subnet ' - success_msg: ' VM updated successfully by deleting a subnet ' + fail_msg: " Unable to update vm by deleting a subnet " + success_msg: " VM updated successfully by deleting a subnet " # #################################################################### - name: Update VM by deleting it @@ -560,5 +560,5 @@ - result.response.status == 'SUCCEEDED' - result.vm_uuid - result.task_uuid - fail_msg: 'Fail: Unable to delete created vm ' - success_msg: 'Success: Vm deleted sucessfully' + fail_msg: "Fail: Unable to delete created vm " + success_msg: "Success: Vm deleted successfully" diff --git a/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml b/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml index 93b21f003..fcf5874c4 100644 --- a/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml +++ b/tests/integration/targets/nutanix_vms_info/tasks/list_vms.yml @@ -1,7 +1,7 @@ - set_fact: todelete: [] -- name: Creat anohter VM with same name +- name: Create another VM with same name ntnx_vms: name: "{{ vm.name }}" cluster: @@ -15,8 +15,8 @@ that: - output.response is defined - output.response.status.state == 'COMPLETE' - fail_msg: ' Unable to create VM with minimum requiremnts ' - success_msg: ' VM with minimum requiremnts created successfully ' + fail_msg: " Unable to create VM with minimum requirements " + success_msg: " VM with minimum requirements created successfully " - set_fact: todelete: '{{ todelete + [ output["response"]["metadata"]["uuid"] ] }}' @@ -46,7 +46,6 @@ register: result ignore_errors: True - - name: Listing Status assert: that: @@ -87,8 +86,8 @@ - name: Delete all Created VMs ntnx_vms: state: absent - vm_uuid: '{{ item }}' + vm_uuid: "{{ item }}" register: result - loop: '{{ todelete }}' + loop: "{{ todelete }}" - set_fact: todelete: [] diff --git a/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml b/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml index 3cc3113d4..1061ab9bf 100644 --- a/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml +++ b/tests/integration/targets/nutanix_vpcs/tasks/create_vpcs.yml @@ -48,7 +48,7 @@ name: vpc_with_routable_ips routable_ips: - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" + network_prefix: "{{ routable_ips.network_prefix }}" register: result ignore_errors: True @@ -71,7 +71,7 @@ - subnet_name: "{{ external_nat_subnet.name }}" routable_ips: - network_ip: "{{ routable_ips.network_ip_2 }}" - network_prefix: "{{ routable_ips.network_prefix_2 }}" + network_prefix: "{{ routable_ips.network_prefix_2 }}" register: result ignore_errors: True @@ -95,7 +95,6 @@ register: result ignore_errors: True - - set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" ########################################################## @@ -110,16 +109,16 @@ - set_fact: todelete: [] ########################################################## -- name: Create VPC with all specfactions +- name: Create VPC with all specifications ntnx_vpcs: state: present - name: vpc_with_add_specfactions + name: vpc_with_add_specifications external_subnets: - subnet_name: "{{ external_nat_subnet.name }}" dns_servers: "{{ dns_servers }}" routable_ips: - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" + network_prefix: "{{ routable_ips.network_prefix }}" register: result ignore_errors: True @@ -128,8 +127,8 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: " Unable to create vpc all specfactions " - success_msg: " VPC with all specfactions created successfully " + fail_msg: " Unable to create vpc all specifications " + success_msg: " VPC with all specifications created successfully " - set_fact: todelete: "{{ todelete + [ result.vpc_uuid ] }}" diff --git a/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml b/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml index 7d0339fa6..977cce555 100644 --- a/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml +++ b/tests/integration/targets/nutanix_vpcs/tasks/delete_vpc.yml @@ -1,14 +1,14 @@ --- -- name: Create VPC with all specfactions +- name: Create VPC with all specifications ntnx_vpcs: state: present - name: vpc_with_add_specfactions + name: vpc_with_add_specifications external_subnets: - subnet_name: "{{ external_nat_subnet.name }}" dns_servers: "{{ dns_servers }}" routable_ips: - network_ip: "{{ routable_ips.network_ip }}" - network_prefix: "{{ routable_ips.network_prefix }}" + network_prefix: "{{ routable_ips.network_prefix }}" register: result ignore_errors: True @@ -17,9 +17,8 @@ that: - result.response is defined - result.response.status.state == 'COMPLETE' - fail_msg: " Unable to create vpc all specfactions " - success_msg: " VPC with all specfactions created successfully " - + fail_msg: " Unable to create vpc all specifications " + success_msg: " VPC with all specifications created successfully " - name: Delete vpc ntnx_vpcs: diff --git a/tests/integration/targets/prepare_ndb_env/vars/main.yml b/tests/integration/targets/prepare_ndb_env/vars/main.yml index bc3e181b2..5f1af010c 100644 --- a/tests/integration/targets/prepare_ndb_env/vars/main.yml +++ b/tests/integration/targets/prepare_ndb_env/vars/main.yml @@ -129,7 +129,7 @@ cluster_ips: vm_password: "TEST_VM_PASSWORD" vm_username: "TEST_VM_USERNAME" -# exitsing db server VM reference for software profile tests +# existing db server VM reference for software profile tests db_server_vm: name: "TEST_DB_SERVER_VM_NAME" uuid: "TEST_DB_SERVER_VM_UUID" @@ -181,4 +181,4 @@ ndb_vlan: updated_primary_dns: "TEST_UPDATED_PRIMARY_DNS" updated_secondary_dns: "TEST_UPDATED_SECONDARY_DNS" -todelete: [] \ No newline at end of file +todelete: [] From 55a03736b63a5aeff58fcd5e9eea84104e38e654 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Thu, 26 Sep 2024 16:24:54 +0300 Subject: [PATCH 2/9] Adding more spelling fixes --- CHANGELOG.rst | 10 ++++----- changelogs/changelog.yaml | 10 ++++----- plugins/doc_fragments/ntnx_vms_base.py | 2 +- plugins/module_utils/entity.py | 6 ++--- .../ndb/database_engines/db_engine_factory.py | 2 +- .../ndb/database_engines/postgres.py | 4 ++-- .../module_utils/ndb/database_instances.py | 2 +- plugins/module_utils/ndb/db_server_vm.py | 4 ++-- .../ndb/profiles/profile_types.py | 10 ++++----- plugins/module_utils/prism/acps.py | 2 +- .../prism/idempotence_identifiers.py | 4 ++-- .../module_utils/prism/projects_internal.py | 22 +++++++++---------- .../module_utils/prism/protection_rules.py | 2 +- plugins/modules/ntnx_acps.py | 2 +- plugins/modules/ntnx_categories.py | 4 ++-- plugins/modules/ntnx_clusters_info.py | 2 +- plugins/modules/ntnx_floating_ips.py | 4 ++-- plugins/modules/ntnx_floating_ips_info.py | 4 ++-- plugins/modules/ntnx_foundation.py | 4 ++-- plugins/modules/ntnx_foundation_central.py | 2 +- ...foundation_central_imaged_clusters_info.py | 2 +- ...nx_foundation_central_imaged_nodes_info.py | 2 +- .../ntnx_foundation_node_network_info.py | 2 +- plugins/modules/ntnx_images.py | 4 ++-- plugins/modules/ntnx_karbon_clusters_info.py | 2 +- .../ntnx_ndb_authorize_db_server_vms.py | 2 +- plugins/modules/ntnx_ndb_database_clones.py | 4 ++-- plugins/modules/ntnx_ndb_databases.py | 6 ++--- plugins/modules/ntnx_ndb_profiles.py | 6 ++--- plugins/modules/ntnx_ndb_register_database.py | 4 ++-- .../modules/ntnx_ndb_time_machines_info.py | 4 ++-- plugins/modules/ntnx_ndb_vlans.py | 8 +++---- plugins/modules/ntnx_pbrs.py | 6 ++--- plugins/modules/ntnx_projects.py | 4 ++-- plugins/modules/ntnx_recovery_plan_jobs.py | 6 ++--- plugins/modules/ntnx_roles.py | 2 +- plugins/modules/ntnx_security_rules.py | 2 +- plugins/modules/ntnx_service_groups.py | 2 +- plugins/modules/ntnx_user_groups.py | 2 +- plugins/modules/ntnx_user_groups_info.py | 2 +- plugins/modules/ntnx_users.py | 2 +- scripts/codegen.py | 4 ++-- 42 files changed, 89 insertions(+), 91 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 578a0c76b..58e7d6113 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -173,7 +173,7 @@ Bugfixes New Modules ----------- -- ntnx_acps - acp module which suports acp Create, update and delete operations +- ntnx_acps - acp module which supports acp Create, update and delete operations - ntnx_acps_info - acp info module - ntnx_address_groups - module which supports address groups CRUD operations - ntnx_address_groups_info - address groups info module @@ -186,7 +186,7 @@ New Modules - ntnx_projects_info - projects info module - ntnx_roles - module which supports role CRUD operations - ntnx_roles_info - role info module -- ntnx_service_groups - service_groups module which suports service_groups CRUD operations +- ntnx_service_groups - service_groups module which supports service_groups CRUD operations - ntnx_service_groups_info - service_group info module - ntnx_user_groups - user_groups module which supports pc user_groups management create delete operations - ntnx_user_groups_info - User Groups info module @@ -203,7 +203,7 @@ New Modules - ntnx_image_placement_policy - image placement policy module which supports Create, update and delete operations - ntnx_images - images module which supports pc images management CRUD operations - ntnx_images_info - images info module -- ntnx_security_rules - security_rule module which suports security_rule CRUD operations +- ntnx_security_rules - security_rule module which supports security_rule CRUD operations - ntnx_security_rules_info - security_rule info module - ntnx_static_routes - vpc static routes - ntnx_static_routes_info - vpc static routes info module @@ -243,8 +243,8 @@ New Modules - ntnx_foundation_central - Nutanix module to imaged Nodes and optionally create cluster - ntnx_foundation_central_api_keys - Nutanix module which creates api key for foundation central - ntnx_foundation_central_api_keys_info - Nutanix module which returns the api key -- ntnx_foundation_central_imaged_clusters_info - Nutanix module which returns the imaged clusters within the Foudation Central -- ntnx_foundation_central_imaged_nodes_info - Nutanix module which returns the imaged nodes within the Foudation Central +- ntnx_foundation_central_imaged_clusters_info - Nutanix module which returns the imaged clusters within the Foundation Central +- ntnx_foundation_central_imaged_nodes_info - Nutanix module which returns the imaged nodes within the Foundation Central - ntnx_foundation_discover_nodes_info - Nutanix module which returns nodes discovered by Foundation - ntnx_foundation_hypervisor_images_info - Nutanix module which returns the hypervisor images uploaded to Foundation - ntnx_foundation_image_upload - Nutanix module which uploads hypervisor or AOS image to foundation vm. diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 1b0d077bc..b0ff11f68 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -101,12 +101,12 @@ releases: name: ntnx_foundation_central_api_keys_info namespace: "" - description: - Nutanix module which returns the imaged clusters within the Foudation + Nutanix module which returns the imaged clusters within the Foundation Central name: ntnx_foundation_central_imaged_clusters_info namespace: "" - description: - Nutanix module which returns the imaged nodes within the Foudation + Nutanix module which returns the imaged nodes within the Foundation Central name: ntnx_foundation_central_imaged_nodes_info namespace: "" @@ -154,7 +154,7 @@ releases: - description: images info module name: ntnx_images_info namespace: "" - - description: security_rule module which suports security_rule CRUD operations + - description: security_rule module which supports security_rule CRUD operations name: ntnx_security_rules namespace: "" - description: security_rule info module @@ -172,7 +172,7 @@ releases: bugfixes: - Fix examples of info modules [\#226](https://github.com/nutanix/nutanix.ansible/issues/226) modules: - - description: acp module which suports acp Create, update and delete operations + - description: acp module which supports acp Create, update and delete operations name: ntnx_acps namespace: "" - description: acp info module @@ -211,7 +211,7 @@ releases: - description: role info module name: ntnx_roles_info namespace: "" - - description: service_groups module which suports service_groups CRUD operations + - description: service_groups module which supports service_groups CRUD operations name: ntnx_service_groups namespace: "" - description: service_group info module diff --git a/plugins/doc_fragments/ntnx_vms_base.py b/plugins/doc_fragments/ntnx_vms_base.py index 635168013..c2586dbdd 100644 --- a/plugins/doc_fragments/ntnx_vms_base.py +++ b/plugins/doc_fragments/ntnx_vms_base.py @@ -72,7 +72,7 @@ class ModuleDocFragment(object): - absent subnet: description: - - Name or UUID of the subnet to which the VM should be connnected + - Name or UUID of the subnet to which the VM should be connected type: dict suboptions: name: diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index 953b7828d..a2d8d97cd 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -344,7 +344,7 @@ def _build_url_with_query(self, url, query): def _fetch_url( self, url, method, data=None, raise_error=True, no_response=False, timeout=30 ): - # only jsonify if content-type supports, added to avoid incase of form-url-encodeded type data + # only jsonify if content-type supports, added to avoid incase of form-url-encoded type data if self.headers["Content-Type"] == "application/json" and data is not None: data = self.module.jsonify(data) @@ -362,7 +362,7 @@ def _fetch_url( body = None - # buffer size with ref. to max read size of http.client.HTTPResponse.read() defination + # buffer size with ref. to max read size of http.client.HTTPResponse.read() definition buffer_size = 65536 # From ansible-core>=2.13, incase of http error, urllib.HTTPError object is returned in resp @@ -500,7 +500,7 @@ def _filter_entities(entities, custom_filters): return filtered_entities -# Read files in chunks and yeild it +# Read files in chunks and yield it class CreateChunks(object): def __init__(self, filename, chunk_size=1 << 13): self.filename = filename diff --git a/plugins/module_utils/ndb/database_engines/db_engine_factory.py b/plugins/module_utils/ndb/database_engines/db_engine_factory.py index 1467b42ef..40f69f092 100644 --- a/plugins/module_utils/ndb/database_engines/db_engine_factory.py +++ b/plugins/module_utils/ndb/database_engines/db_engine_factory.py @@ -20,7 +20,7 @@ def get_engine_type(module): if type in module.params: return type, None - return None, "Input doesn't conatains config for allowed engine types of databases" + return None, "Input doesn't contains config for allowed engine types of databases" def create_db_engine(module, engine_type=None, db_architecture=None): diff --git a/plugins/module_utils/ndb/database_engines/postgres.py b/plugins/module_utils/ndb/database_engines/postgres.py index 223cad085..b9c29be67 100644 --- a/plugins/module_utils/ndb/database_engines/postgres.py +++ b/plugins/module_utils/ndb/database_engines/postgres.py @@ -115,7 +115,7 @@ def build_spec_db_instance_provision_action_arguments(self, payload, config): spec = {"name": key, "value": config.get(key, value)} action_arguments.append(spec) - # handle scenariors where display names are diff + # handle scenarios where display names are diff action_arguments.append( {"name": "database_names", "value": config.get("db_name")} ) @@ -212,7 +212,7 @@ def build_spec_db_instance_provision_action_arguments(self, payload, config): spec = {"name": key, "value": config.get(key, default)} action_arguments.append(spec) - # handle scenariors where display names are different + # handle scenarios where display names are different action_arguments.append( {"name": "database_names", "value": config.get("db_name")} ) diff --git a/plugins/module_utils/ndb/database_instances.py b/plugins/module_utils/ndb/database_instances.py index 77bacde4a..1f5208afd 100644 --- a/plugins/module_utils/ndb/database_instances.py +++ b/plugins/module_utils/ndb/database_instances.py @@ -281,7 +281,7 @@ def get_engine_type(self): return ( None, - "Input doesn't conatains config for allowed engine types of databases", + "Input doesn't contains config for allowed engine types of databases", ) def get_db_engine_spec(self, payload, params=None, **kwargs): diff --git a/plugins/module_utils/ndb/db_server_vm.py b/plugins/module_utils/ndb/db_server_vm.py index dee8548ba..670a2f4da 100644 --- a/plugins/module_utils/ndb/db_server_vm.py +++ b/plugins/module_utils/ndb/db_server_vm.py @@ -451,7 +451,7 @@ def get_spec_registered_vm_for_db_instance_registration( if not vm_info.get("ipAddresses", []): return None, "No IP address found for given db server vm" - # picking first IP of db server vm for registraion + # picking first IP of db server vm for registration payload["vmIp"] = vm_info["ipAddresses"][0] elif params.get("ip"): @@ -572,7 +572,7 @@ def build_spec_software_profile(self, payload, profile): return payload, None def build_spec_network_profile(self, payload, profile): - # set network prfile + # set network profile network_profile = NetworkProfile(self.module) uuid, err = network_profile.get_profile_uuid(profile) if err: diff --git a/plugins/module_utils/ndb/profiles/profile_types.py b/plugins/module_utils/ndb/profiles/profile_types.py index 1aadd8e54..f651d9904 100644 --- a/plugins/module_utils/ndb/profiles/profile_types.py +++ b/plugins/module_utils/ndb/profiles/profile_types.py @@ -233,7 +233,7 @@ def _build_spec_multi_networks(self, payload, vlans): cluster_name = clusters_uuid_name_map[cluster_uuid] if not cluster_name: - return None, "Pleae provide uuid or name for getting cluster info" + return None, "Please provide uuid or name for getting cluster info" properties_map["CLUSTER_NAME_" + str(i)] = cluster_name properties_map["CLUSTER_ID_" + str(i)] = clusters_name_uuid_map[ @@ -252,7 +252,7 @@ def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): self.build_spec_methods.update( { "software": self._build_spec_profile, - "clusters": self._build_spec_clusters_availibilty, + "clusters": self._build_spec_clusters_availability, } ) payload, err = super().get_create_profile_spec( @@ -269,7 +269,7 @@ def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): def get_update_profile_spec(self, old_spec=None, params=None, **kwargs): self.build_spec_methods.update( - {"clusters": self._build_spec_clusters_availibilty} + {"clusters": self._build_spec_clusters_availability} ) payload, err = super().get_update_profile_spec(old_spec, params, **kwargs) if err: @@ -374,7 +374,7 @@ def _build_spec_version_create_properties( payload["properties"] = properties return payload, None - def _build_spec_clusters_availibilty(self, payload, clusters): + def _build_spec_clusters_availability(self, payload, clusters): _clusters = Cluster(self.module) spec = [] clusters_name_uuid_map = _clusters.get_all_clusters_name_uuid_map() @@ -485,7 +485,7 @@ def get_profile_type(module): if type in module.params: return type, None - return None, "Input doesn't conatains config for allowed profile types of databases" + return None, "Input doesn't contains config for allowed profile types of databases" def get_profile_type_obj(module, profile_type=None): # -> tuple[Profile, str]: diff --git a/plugins/module_utils/prism/acps.py b/plugins/module_utils/prism/acps.py index 5ae0fe327..dd5f6d800 100644 --- a/plugins/module_utils/prism/acps.py +++ b/plugins/module_utils/prism/acps.py @@ -135,7 +135,7 @@ def build_role_permissions_based_context(self, role_uuid): if permission.get("name"): role_permissions_names.append(permission["name"]) - # Get predefined premissions to entity access expressions from constants + # Get predefined permissions to entity access expressions from constants expressions_dict = CONSTANTS.EntityFilterExpressionList.PERMISSION_TO_ACCESS_MAP permission_names = expressions_dict.keys() diff --git a/plugins/module_utils/prism/idempotence_identifiers.py b/plugins/module_utils/prism/idempotence_identifiers.py index f8e5be4ca..ed52b1928 100644 --- a/plugins/module_utils/prism/idempotence_identifiers.py +++ b/plugins/module_utils/prism/idempotence_identifiers.py @@ -9,10 +9,10 @@ from .prism import Prism -class IdempotenceIdenitifiers(Prism): +class IdempotenceIdentifiers(Prism): def __init__(self, module): resource_type = "/idempotence_identifiers" - super(IdempotenceIdenitifiers, self).__init__( + super(IdempotenceIdentifiers, self).__init__( module, resource_type=resource_type ) diff --git a/plugins/module_utils/prism/projects_internal.py b/plugins/module_utils/prism/projects_internal.py index a086e5864..bf1806451 100644 --- a/plugins/module_utils/prism/projects_internal.py +++ b/plugins/module_utils/prism/projects_internal.py @@ -8,7 +8,7 @@ from .accounts import Account, get_account_uuid from .acps import ACP from .clusters import Cluster -from .idempotence_identifiers import IdempotenceIdenitifiers +from .idempotence_identifiers import IdempotenceIdentifiers from .prism import Prism from .roles import get_role_uuid from .subnets import Subnet, get_subnet_uuid @@ -133,9 +133,9 @@ def _build_spec_default_subnet(self, payload, subnet_ref): if err: return None, err - payload["spec"]["project_detail"]["resources"][ - "default_subnet_reference" - ] = Subnet.build_subnet_reference_spec(uuid) + payload["spec"]["project_detail"]["resources"]["default_subnet_reference"] = ( + Subnet.build_subnet_reference_spec(uuid) + ) return payload, None def _build_spec_subnets(self, payload, subnet_ref_list): @@ -193,7 +193,7 @@ def _build_spec_user_and_user_groups_list(self, payload, role_mappings): ): new_uuids_required += 1 - ii = IdempotenceIdenitifiers(self.module) + ii = IdempotenceIdentifiers(self.module) # get uuids for user groups new_uuid_list = ii.get_idempotent_uuids(new_uuids_required) @@ -393,13 +393,11 @@ def _build_spec_role_mappings(self, payload, role_mappings): acp["acp"]["resources"]["user_reference_list"] = role_user_groups_map[ acp["acp"]["resources"]["role_reference"]["uuid"] ]["users"] - acp["acp"]["resources"][ - "user_group_reference_list" - ] = role_user_groups_map[ - acp["acp"]["resources"]["role_reference"]["uuid"] - ][ - "user_groups" - ] + acp["acp"]["resources"]["user_group_reference_list"] = ( + role_user_groups_map[ + acp["acp"]["resources"]["role_reference"]["uuid"] + ]["user_groups"] + ) # pop the role uuid entry once used for acp update role_user_groups_map.pop( diff --git a/plugins/module_utils/prism/protection_rules.py b/plugins/module_utils/prism/protection_rules.py index ed4bd751a..0a773d4dd 100644 --- a/plugins/module_utils/prism/protection_rules.py +++ b/plugins/module_utils/prism/protection_rules.py @@ -110,7 +110,7 @@ def _build_spec_schedules(self, payload, schedules): ): return ( None, - "rpo, rpo_unit, snapshot_type and atleast one policy are required fields for aysynchronous snapshot schedule", + "rpo, rpo_unit, snapshot_type and atleast one policy are required fields for asynchronous snapshot schedule", ) spec["recovery_point_objective_secs"], err = convert_to_secs( diff --git a/plugins/modules/ntnx_acps.py b/plugins/modules/ntnx_acps.py index ddc22aee9..85dc2ee65 100644 --- a/plugins/modules/ntnx_acps.py +++ b/plugins/modules/ntnx_acps.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_acps -short_description: acp module which suports acp Create, update and delete operations +short_description: acp module which supports acp Create, update and delete operations version_added: 1.4.0 description: 'Create, Update, Delete acp' options: diff --git a/plugins/modules/ntnx_categories.py b/plugins/modules/ntnx_categories.py index a2855894e..a094b4fe2 100644 --- a/plugins/modules/ntnx_categories.py +++ b/plugins/modules/ntnx_categories.py @@ -16,7 +16,7 @@ options: remove_values: description: - - it indicates to remove all values of the specfied category + - it indicates to remove all values of the specified category - This attribute can be only used with C(state) is absent - This attribute is mutually exclusive with C(values) when state is absent type: bool @@ -202,7 +202,7 @@ def create_categories(module, result): if value not in category_key_values: category_values_specs.append(_category_value.get_value_spec(value)) - # indempotency check + # idempotency check if not category_values_specs and ( category_key_exists and (category_key == category_key_spec) ): diff --git a/plugins/modules/ntnx_clusters_info.py b/plugins/modules/ntnx_clusters_info.py index 33b100eb3..5e1ecf6aa 100644 --- a/plugins/modules/ntnx_clusters_info.py +++ b/plugins/modules/ntnx_clusters_info.py @@ -32,7 +32,7 @@ - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" - - name: List clusterss + - name: List clusters ntnx_clusters_info: nutanix_host: "{{ ip }}" nutanix_username: "{{ username }}" diff --git a/plugins/modules/ntnx_floating_ips.py b/plugins/modules/ntnx_floating_ips.py index 3986b73e0..c858adcac 100644 --- a/plugins/modules/ntnx_floating_ips.py +++ b/plugins/modules/ntnx_floating_ips.py @@ -82,7 +82,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" external_subnet: - uuid: "{{external_subnet.subnet_uuiid}}" + uuid: "{{external_subnet.subnet_uuid}}" - name: create Floating IP with vpc Name with external subnet uuid ntnx_floating_ips: @@ -92,7 +92,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" external_subnet: - uuid: "{{external_subnet.subnet_uuiid}}" + uuid: "{{external_subnet.subnet_uuid}}" vpc: name: "{{vpc.vpc_name}}" private_ip: "{{private_ip}}" diff --git a/plugins/modules/ntnx_floating_ips_info.py b/plugins/modules/ntnx_floating_ips_info.py index bb0389e5f..1ac7b0819 100644 --- a/plugins/modules/ntnx_floating_ips_info.py +++ b/plugins/modules/ntnx_floating_ips_info.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_floating_ips_info -short_description: Floting ips info module +short_description: Floating ips info module version_added: 1.0.0 description: 'Get floating_ip info' options: @@ -21,7 +21,7 @@ default: floating_ip fip_uuid: description: - - Floting ip UUID + - Floating ip UUID type: str extends_documentation_fragment: - nutanix.ncp.ntnx_credentials diff --git a/plugins/modules/ntnx_foundation.py b/plugins/modules/ntnx_foundation.py index f5b7a3d15..7a1294b90 100644 --- a/plugins/modules/ntnx_foundation.py +++ b/plugins/modules/ntnx_foundation.py @@ -295,7 +295,7 @@ required: false other_config: description: - - Auxillary lacp configurations. Applicable only for AHV + - Auxiliary lacp configurations. Applicable only for AHV type: list elements: str required: false @@ -549,7 +549,7 @@ required: false other_config: description: - - Auxillary lacp configurations. Applicable only for AHV + - Auxiliary lacp configurations. Applicable only for AHV type: list elements: str required: false diff --git a/plugins/modules/ntnx_foundation_central.py b/plugins/modules/ntnx_foundation_central.py index 7c8acdb76..7e30db41c 100644 --- a/plugins/modules/ntnx_foundation_central.py +++ b/plugins/modules/ntnx_foundation_central.py @@ -363,7 +363,7 @@ """ RETURN = r""" -respone: +response: description: Sample response when only Imaging is done. returned: always type: dict diff --git a/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py b/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py index deae2d704..a371db69e 100644 --- a/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py +++ b/plugins/modules/ntnx_foundation_central_imaged_clusters_info.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_foundation_central_imaged_clusters_info -short_description: Nutanix module which returns the imaged clusters within the Foudation Central +short_description: Nutanix module which returns the imaged clusters within the Foundation Central version_added: 1.1.0 description: 'List all the imaged clusters created in Foundation Central.' options: diff --git a/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py b/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py index 4a62ec521..33bdc2e8a 100644 --- a/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py +++ b/plugins/modules/ntnx_foundation_central_imaged_nodes_info.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_foundation_central_imaged_nodes_info -short_description: Nutanix module which returns the imaged nodes within the Foudation Central +short_description: Nutanix module which returns the imaged nodes within the Foundation Central version_added: 1.1.0 description: 'List all the imaged nodes created in Foundation Central.' options: diff --git a/plugins/modules/ntnx_foundation_node_network_info.py b/plugins/modules/ntnx_foundation_node_network_info.py index 35f48856e..2feef029a 100644 --- a/plugins/modules/ntnx_foundation_node_network_info.py +++ b/plugins/modules/ntnx_foundation_node_network_info.py @@ -85,7 +85,7 @@ def get_node_network_details(module, result): timeout = module.params.get("timeout") resp = node_network_details.retrieve(nodes, timeout) if not resp: - result["error"] = "Faied to retrieve node network details" + result["error"] = "Failed to retrieve node network details" module.fail_json( msg="Failed to retrieve node network details via foundation", **result ) diff --git a/plugins/modules/ntnx_images.py b/plugins/modules/ntnx_images.py index de98f2f66..0d3d64b50 100644 --- a/plugins/modules/ntnx_images.py +++ b/plugins/modules/ntnx_images.py @@ -67,7 +67,7 @@ type: dict remove_categories: description: - - set this flag to remove dettach all categories attached to image + - set this flag to remove detach all categories attached to image - mutually_exclusive with C(categories) type: bool required: false @@ -196,7 +196,7 @@ - Backup wait: true - - name: dettach all categories from existing image + - name: detach all categories from existing image ntnx_images: state: "present" image_uuid: "00000000-0000-0000-0000-000000000000" diff --git a/plugins/modules/ntnx_karbon_clusters_info.py b/plugins/modules/ntnx_karbon_clusters_info.py index bd4a8b51b..c5c9f14dd 100644 --- a/plugins/modules/ntnx_karbon_clusters_info.py +++ b/plugins/modules/ntnx_karbon_clusters_info.py @@ -139,7 +139,7 @@ returned: if fetch_kubeconfig is true type: str certificate: - description: ssh certifcate + description: ssh certificate returned: if fetch_ssh_credentials is true type: str expiry_time: diff --git a/plugins/modules/ntnx_ndb_authorize_db_server_vms.py b/plugins/modules/ntnx_ndb_authorize_db_server_vms.py index 17b47061a..c9816b4b2 100644 --- a/plugins/modules/ntnx_ndb_authorize_db_server_vms.py +++ b/plugins/modules/ntnx_ndb_authorize_db_server_vms.py @@ -74,7 +74,7 @@ """ RETURN = r""" response: - description: An intentful representation of a authorizisation status + description: An intentful representation of a authorization status returned: always type: dict sample: { diff --git a/plugins/modules/ntnx_ndb_database_clones.py b/plugins/modules/ntnx_ndb_database_clones.py index 181894119..a76de8fee 100644 --- a/plugins/modules/ntnx_ndb_database_clones.py +++ b/plugins/modules/ntnx_ndb_database_clones.py @@ -749,13 +749,13 @@ def get_clone_spec(module, result, time_machine_uuid): provision_new_server = ( True if module.params.get("db_vm", {}).get("create_new_server") else False ) - use_athorized_server = not provision_new_server + use_authorized_server = not provision_new_server kwargs = { "time_machine_uuid": time_machine_uuid, "db_clone": True, "provision_new_server": provision_new_server, - "use_authorized_server": use_athorized_server, + "use_authorized_server": use_authorized_server, } spec, err = db_server_vms.get_spec(old_spec=spec, **kwargs) diff --git a/plugins/modules/ntnx_ndb_databases.py b/plugins/modules/ntnx_ndb_databases.py index 1c8560164..496e10ded 100644 --- a/plugins/modules/ntnx_ndb_databases.py +++ b/plugins/modules/ntnx_ndb_databases.py @@ -386,7 +386,7 @@ cluster: description: - cluster where they will be hosted - - this will overide default cluster provided for all vms + - this will override default cluster provided for all vms type: dict suboptions: name: @@ -402,7 +402,7 @@ network_profile: description: - network profile details - - this will overide default network profile provided for all vms + - this will override default network profile provided for all vms type: dict suboptions: name: @@ -418,7 +418,7 @@ compute_profile: description: - compute profile details for the node - - this will overide default compute profile provided for all vms + - this will override default compute profile provided for all vms type: dict suboptions: name: diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index 873e3bee2..cbf5fbec3 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -286,7 +286,7 @@ checkpoint_completion_target: description: - checkpoint completion target - - deafult is 0.5 + - default is 0.5 type: float autovacuum_freeze_max_age: description: @@ -311,7 +311,7 @@ autovacuum_max_workers: description: - autovacuum max workers - - deafult is 3 + - deadefaultfult is 3 type: int autovacuum_vacuum_cost_delay: description: @@ -1081,7 +1081,7 @@ def create_profile(module, result): result["response"] = resp uuid = resp.get("id") - # incase there is process of replication triggered, operation info is recieved + # incase there is process of replication triggered, operation info is received if profile_type == "software" and not uuid: uuid = resp.get("entityId") diff --git a/plugins/modules/ntnx_ndb_register_database.py b/plugins/modules/ntnx_ndb_register_database.py index 0cfec9798..0c8b963c9 100644 --- a/plugins/modules/ntnx_ndb_register_database.py +++ b/plugins/modules/ntnx_ndb_register_database.py @@ -192,7 +192,7 @@ default: "5432" db_name: description: - - intial database that would be added + - initial database that would be added type: str required: true db_password: @@ -314,7 +314,7 @@ register: result -- name: register database from unregistred vm +- name: register database from unregistered vm ntnx_ndb_register_database: wait: true name: "{{db1_name}}" diff --git a/plugins/modules/ntnx_ndb_time_machines_info.py b/plugins/modules/ntnx_ndb_time_machines_info.py index 8fce6fdae..f4a095f0c 100644 --- a/plugins/modules/ntnx_ndb_time_machines_info.py +++ b/plugins/modules/ntnx_ndb_time_machines_info.py @@ -49,7 +49,7 @@ type: bool value: description: - - value correponding to C(value_type) + - value corresponding to C(value_type) type: str value_type: description: @@ -91,7 +91,7 @@ nutanix_username: "" nutanix_password: "" validate_certs: false - uuid: "" + uuid: "" register: result """ RETURN = r""" diff --git a/plugins/modules/ntnx_ndb_vlans.py b/plugins/modules/ntnx_ndb_vlans.py index c77f65d43..22c85b60b 100644 --- a/plugins/modules/ntnx_ndb_vlans.py +++ b/plugins/modules/ntnx_ndb_vlans.py @@ -28,7 +28,7 @@ type: str vlan_type: description: - - wheather the vlan is mannaged or no + - wether the vlan is managed or no - update allowed type: str choices: ["DHCP", "Static"] @@ -187,12 +187,12 @@ type: str sample: "Static" managed: - description: mannaged or unmannged vlan + description: managed or unmanaged vlan returned: always type: bool propertiesMap: - description: confiuration of static vlan + description: configuration of static vlan type: dict returned: always sample: @@ -232,7 +232,7 @@ ] properties: - description: list of confiuration of static vlan + description: list of configuration of static vlan type: list returned: always sample: diff --git a/plugins/modules/ntnx_pbrs.py b/plugins/modules/ntnx_pbrs.py index 497d0ac20..c4e8feeb4 100644 --- a/plugins/modules/ntnx_pbrs.py +++ b/plugins/modules/ntnx_pbrs.py @@ -51,7 +51,7 @@ type: bool network: description: - - Traffic from specfic network address + - Traffic from specific network address - Mutually exclusive with C(any) and C(external) type: dict suboptions: @@ -77,7 +77,7 @@ type: bool network: description: - - Traffic to specfic network address + - Traffic to specific network address - Mutually exclusive with C(any) and C(external) type: dict suboptions: @@ -93,7 +93,7 @@ suboptions: any: description: - - Any protcol number + - Any protocol number - Mutually exclusive with C(tcp) and C(udp) and C(number) and C(icmp) type: bool tcp: diff --git a/plugins/modules/ntnx_projects.py b/plugins/modules/ntnx_projects.py index 935917f8a..e2de402b9 100644 --- a/plugins/modules/ntnx_projects.py +++ b/plugins/modules/ntnx_projects.py @@ -357,7 +357,7 @@ from ..module_utils.base_module import BaseModule # noqa: E402 from ..module_utils.prism.idempotence_identifiers import ( # noqa: E402 - IdempotenceIdenitifiers, + IdempotenceIdentifiers, ) from ..module_utils.prism.projects import Project # noqa: E402 from ..module_utils.prism.projects_internal import ProjectsInternal # noqa: E402 @@ -485,7 +485,7 @@ def create_project(module, result): if module.params.get("role_mappings"): # generate new uuid for project - ii = IdempotenceIdenitifiers(module) + ii = IdempotenceIdentifiers(module) uuids = ii.get_idempotent_uuids() projects = ProjectsInternal(module, uuid=uuids[0]) diff --git a/plugins/modules/ntnx_recovery_plan_jobs.py b/plugins/modules/ntnx_recovery_plan_jobs.py index a1acfb586..a115d39b5 100644 --- a/plugins/modules/ntnx_recovery_plan_jobs.py +++ b/plugins/modules/ntnx_recovery_plan_jobs.py @@ -76,11 +76,11 @@ Type of action performed by the Recovery Plan Job. VALIDATE - Performs the validation of the Recovery Plan. The validation includes checks for the presence of entities, networks, categories etc. referenced in the Recovery - Plan. MIGRATE - VM would be powered off on the sourece before migrating it + Plan. MIGRATE - VM would be powered off on the source before migrating it to the recovery Availability Zone. FAILOVER - Restore the entity from the recovery points on the recovery Availability Zone. TEST_FAILOVER - Same as FAILOVER but on a test network. LIVE_MIGRATE - Migrate without powering - off the VM. CLEANUP - for cleaning entities created usnig test failover + off the VM. CLEANUP - for cleaning entities created using test failover type: str required: true choices: @@ -463,7 +463,7 @@ def get_module_spec(): def get_recovery_plan_job_uuid(module, task_uuid): """ This function extracts recovery plan job uuid from task status. - It polls for 10 mins untill the recovery plan job uuid comes up in task response. + It polls for 10 mins until the recovery plan job uuid comes up in task response. """ task = Task(module) timeout = time.time() + 600 diff --git a/plugins/modules/ntnx_roles.py b/plugins/modules/ntnx_roles.py index 72315cae0..b400d425a 100644 --- a/plugins/modules/ntnx_roles.py +++ b/plugins/modules/ntnx_roles.py @@ -80,7 +80,7 @@ name: test-ansible-role-1 desc: test-ansible-role-1-desc permissions: - - name: "" + - name: "" - uuid: "" - uuid: "" wait: true diff --git a/plugins/modules/ntnx_security_rules.py b/plugins/modules/ntnx_security_rules.py index 6787af1e9..84f78faeb 100644 --- a/plugins/modules/ntnx_security_rules.py +++ b/plugins/modules/ntnx_security_rules.py @@ -9,7 +9,7 @@ DOCUMENTATION = r""" module: ntnx_security_rules -short_description: security_rule module which suports security_rule CRUD operations +short_description: security_rule module which supports security_rule CRUD operations version_added: 1.3.0 description: 'Create, Update, Delete security_rule' options: diff --git a/plugins/modules/ntnx_service_groups.py b/plugins/modules/ntnx_service_groups.py index e932e9da5..532608f24 100644 --- a/plugins/modules/ntnx_service_groups.py +++ b/plugins/modules/ntnx_service_groups.py @@ -10,7 +10,7 @@ DOCUMENTATION = r""" --- module: ntnx_service_groups -short_description: service_groups module which suports service_groups CRUD operations +short_description: service_groups module which supports service_groups CRUD operations version_added: 1.4.0 description: 'Create, Update, Delete service_group' options: diff --git a/plugins/modules/ntnx_user_groups.py b/plugins/modules/ntnx_user_groups.py index 641f4a2dd..f549eca37 100644 --- a/plugins/modules/ntnx_user_groups.py +++ b/plugins/modules/ntnx_user_groups.py @@ -46,7 +46,7 @@ type: dict remove_categories: description: - - set this flag to remove dettach all categories attached to user_group + - set this flag to remove detach all categories attached to user_group - mutually_exclusive with C(categories) type: bool required: false diff --git a/plugins/modules/ntnx_user_groups_info.py b/plugins/modules/ntnx_user_groups_info.py index 0c0875e31..a4149189e 100644 --- a/plugins/modules/ntnx_user_groups_info.py +++ b/plugins/modules/ntnx_user_groups_info.py @@ -154,7 +154,7 @@ "name": "qanucalm", "uuid": "00000000-0000-0000-0000-000000000000" }, - "distinguished_name": "" + "distinguished_name": "" }, "display_name": "name1", "projects_reference_list": [], diff --git a/plugins/modules/ntnx_users.py b/plugins/modules/ntnx_users.py index 90708d627..05915ece9 100644 --- a/plugins/modules/ntnx_users.py +++ b/plugins/modules/ntnx_users.py @@ -46,7 +46,7 @@ type: dict remove_categories: description: - - set this flag to remove dettach all categories attached to user + - set this flag to remove detach all categories attached to user - mutually_exclusive with C(categories) type: bool required: false diff --git a/scripts/codegen.py b/scripts/codegen.py index c182b97f1..747af2d9c 100644 --- a/scripts/codegen.py +++ b/scripts/codegen.py @@ -14,7 +14,7 @@ DOCUMENTATION = r""" --- module: ntnx_MNAME -short_description: MNAME module which suports INAME CRUD operations +short_description: MNAME module which supports INAME CRUD operations version_added: 1.0.0 description: 'Create, Update, Delete MNAME' options: @@ -192,7 +192,7 @@ def __init__(self, module): super(CNAME, self).__init__(module, resource_type=resource_type) self.build_spec_methods = { # Step 2. This is a Map of - # ansible attirbute and corresponding API spec generation method + # ansible attribute and corresponding API spec generation method # Example: method name should start with _build_spec_ # name: _build_spec_name } From d25b5a782c70abe353c27e25f9c8784e57fa80a3 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Sun, 29 Sep 2024 12:02:56 +0300 Subject: [PATCH 3/9] Adding additional spelling corrections --- CHANGELOG.rst | 2 +- plugins/inventory/ntnx_prism_vm_inventory.py | 2 +- plugins/module_utils/foundation/image_nodes.py | 2 +- plugins/module_utils/prism/protection_rules.py | 2 +- plugins/modules/ntnx_foundation.py | 18 +++++++++--------- plugins/modules/ntnx_image_placement_policy.py | 4 ++-- plugins/modules/ntnx_images_info.py | 2 +- .../modules/ntnx_ndb_database_clone_refresh.py | 2 +- plugins/modules/ntnx_ndb_database_clones.py | 2 +- plugins/modules/ntnx_ndb_database_snapshots.py | 2 +- plugins/modules/ntnx_ndb_databases_info.py | 2 +- plugins/modules/ntnx_ndb_db_server_vms.py | 2 +- plugins/modules/ntnx_ndb_profiles.py | 4 ++-- plugins/modules/ntnx_ndb_profiles_info.py | 2 +- plugins/modules/ntnx_ndb_register_database.py | 2 +- .../modules/ntnx_ndb_register_db_server_vm.py | 2 +- plugins/modules/ntnx_recovery_plans.py | 2 +- plugins/modules/ntnx_static_routes.py | 2 +- plugins/modules/ntnx_users.py | 2 +- plugins/modules/ntnx_vms.py | 2 +- tests/unit/plugins/module_utils/test_entity.py | 2 +- 21 files changed, 31 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 58e7d6113..2ce41f80e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -274,7 +274,7 @@ Bugfixes - Bug/cluster UUID issue68 [\#72](https://github.com/nutanix/nutanix.ansible/pull/72) - Client SDK with inventory [\#45](https://github.com/nutanix/nutanix.ansible/pull/45) - Creating a VM based on a disk_image without specifying the size_gb -- Fix error messages for get_uuid() reponse [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) +- Fix error messages for get_uuid() response [\#47](https://github.com/nutanix/nutanix.ansible/pull/47) - Fix/integ [\#96](https://github.com/nutanix/nutanix.ansible/pull/96) - Sanity and python fix [\#46](https://github.com/nutanix/nutanix.ansible/pull/46) - Task/fix failing sanity [\#117](https://github.com/nutanix/nutanix.ansible/pull/117) diff --git a/plugins/inventory/ntnx_prism_vm_inventory.py b/plugins/inventory/ntnx_prism_vm_inventory.py index dd6d7fe9b..4a085861f 100644 --- a/plugins/inventory/ntnx_prism_vm_inventory.py +++ b/plugins/inventory/ntnx_prism_vm_inventory.py @@ -93,7 +93,7 @@ def jsonify(self, data): class InventoryModule(BaseInventoryPlugin, Constructable): - """Nutanix VM dynamic invetory module for ansible""" + """Nutanix VM dynamic inventory module for ansible""" NAME = "nutanix.ncp.ntnx_prism_vm_inventory" diff --git a/plugins/module_utils/foundation/image_nodes.py b/plugins/module_utils/foundation/image_nodes.py index 4e11760fb..43fb40ba3 100644 --- a/plugins/module_utils/foundation/image_nodes.py +++ b/plugins/module_utils/foundation/image_nodes.py @@ -404,7 +404,7 @@ def _get_default_node_spec(self, node): "ucsm_node_serial": None, "ucsm_managed_mode": None, "ucsm_params": None, - "exlude_boot_serial": False, + "exclude_boot_serial": False, "mitigate_low_boot_space": False, "bond_uplinks": [], "vswitches": [], diff --git a/plugins/module_utils/prism/protection_rules.py b/plugins/module_utils/prism/protection_rules.py index 0a773d4dd..206d85c6a 100644 --- a/plugins/module_utils/prism/protection_rules.py +++ b/plugins/module_utils/prism/protection_rules.py @@ -110,7 +110,7 @@ def _build_spec_schedules(self, payload, schedules): ): return ( None, - "rpo, rpo_unit, snapshot_type and atleast one policy are required fields for asynchronous snapshot schedule", + "rpo, rpo_unit, snapshot_type and at least one policy are required fields for asynchronous snapshot schedule", ) spec["recovery_point_objective_secs"], err = convert_to_secs( diff --git a/plugins/modules/ntnx_foundation.py b/plugins/modules/ntnx_foundation.py index 7a1294b90..ee1aa9d9f 100644 --- a/plugins/modules/ntnx_foundation.py +++ b/plugins/modules/ntnx_foundation.py @@ -207,7 +207,7 @@ required: false bond_mode: description: - - bonde mode, "dynamic" if using LACP, "static" for LAG + - bond mode, "dynamic" if using LACP, "static" for LAG type: str choices: - dynamic @@ -247,7 +247,7 @@ - UCSM node serial type: bool required: false - exlude_boot_serial: + exclude_boot_serial: description: - serial of boot device to be excluded, used by NX G6 platforms type: bool @@ -327,7 +327,7 @@ required: false discovery_mode: description: - - discover and use existing network informatio pulled from internal info apis + - discover and use existing network information pulled from internal info apis - mutually exclusive with manual_mode - can override certain fields, which are pulled during discovery type: dict @@ -471,7 +471,7 @@ required: false bond_mode: description: - - bonde mode, "dynamic" if using LACP, "static" for LAG + - bond mode, "dynamic" if using LACP, "static" for LAG type: str choices: - dynamic @@ -501,7 +501,7 @@ - UCSM Managed mode type: str required: false - exlude_boot_serial: + exclude_boot_serial: description: - serial of boot device to be excluded, used by NX G6 platforms type: bool @@ -792,12 +792,12 @@ required: false default_ipmi_user: description: - - default ipmi username, required either at node leve or here incase of ipmi based imaging + - default ipmi username, required either at node level or here incase of ipmi based imaging type: str required: false default_ipmi_password: description: - - default ipmi password, required either at node leve or here incase of ipmi based imaging + - default ipmi password, required either at node level or here incase of ipmi based imaging type: str required: false skip_hypervisor: @@ -1054,7 +1054,7 @@ def get_module_spec(): ucsm_node_serial=dict(type="str", required=False), image_successful=dict(type="bool", required=False), ucsm_managed_mode=dict(type="str", required=False), - exlude_boot_serial=dict(type="bool", required=False), + exclude_boot_serial=dict(type="bool", required=False), mitigate_low_boot_space=dict(type="bool", required=False), vswitches=dict(type="list", elements="dict", options=vswitches, required=False), ucsm_params=dict(type="dict", options=ucsm_params, required=False), @@ -1094,7 +1094,7 @@ def get_module_spec(): rdma_passthrough=dict(type="bool", required=False), ucsm_node_serial=dict(type="str", required=False), ucsm_managed_mode=dict(type="str", required=False), - exlude_boot_serial=dict(type="bool", required=False), + exclude_boot_serial=dict(type="bool", required=False), mitigate_low_boot_space=dict(type="bool", required=False), bond_uplinks=dict(type="list", elements="str", required=False), vswitches=dict(type="list", elements="dict", options=vswitches, required=False), diff --git a/plugins/modules/ntnx_image_placement_policy.py b/plugins/modules/ntnx_image_placement_policy.py index 471d3c2e4..ae276f4c5 100644 --- a/plugins/modules/ntnx_image_placement_policy.py +++ b/plugins/modules/ntnx_image_placement_policy.py @@ -84,7 +84,7 @@ description: - When set will remove all categories attached to the policy. - Mutually exclusive ith C(categories) - - It doesnot remove C(image_categories) or C(cluster_categories) + - It does not remove C(image_categories) or C(cluster_categories) required: false type: bool default: false @@ -146,7 +146,7 @@ nutanix_username: "{{ username }}" nutanix_password: "{{ password }}" validate_certs: False - name: "test_policy_2-uodated" + name: "test_policy_2-updated" desc: "test_policy_2_desc-updated" placement_type: hard categories: diff --git a/plugins/modules/ntnx_images_info.py b/plugins/modules/ntnx_images_info.py index 96fbd78f2..bd9ac57d1 100644 --- a/plugins/modules/ntnx_images_info.py +++ b/plugins/modules/ntnx_images_info.py @@ -136,7 +136,7 @@ "resources": { "architecture": "X86_64", "image_type": "DISK_IMAGE", - "source_uri": "" + "source_uri": "" } }, "status": { diff --git a/plugins/modules/ntnx_ndb_database_clone_refresh.py b/plugins/modules/ntnx_ndb_database_clone_refresh.py index e1e488b08..d7bbf67a7 100644 --- a/plugins/modules/ntnx_ndb_database_clone_refresh.py +++ b/plugins/modules/ntnx_ndb_database_clone_refresh.py @@ -11,7 +11,7 @@ module: ntnx_ndb_database_clone_refresh short_description: module for database clone refresh. version_added: 1.8.0 -description: moudle for refreshing database clone to certain point in time or snapshot. +description: module for refreshing database clone to certain point in time or snapshot. options: uuid: description: diff --git a/plugins/modules/ntnx_ndb_database_clones.py b/plugins/modules/ntnx_ndb_database_clones.py index a76de8fee..56276b9a7 100644 --- a/plugins/modules/ntnx_ndb_database_clones.py +++ b/plugins/modules/ntnx_ndb_database_clones.py @@ -127,7 +127,7 @@ use_authorized_server: description: - - conifgure authorized database server VM for hosting database clone + - configure authorized database server VM for hosting database clone type: dict suboptions: name: diff --git a/plugins/modules/ntnx_ndb_database_snapshots.py b/plugins/modules/ntnx_ndb_database_snapshots.py index 751ae1f8e..2107ade08 100644 --- a/plugins/modules/ntnx_ndb_database_snapshots.py +++ b/plugins/modules/ntnx_ndb_database_snapshots.py @@ -25,7 +25,7 @@ type: str name: description: - - name of snaphsot. + - name of snapshot. - required for create - update is allowed type: str diff --git a/plugins/modules/ntnx_ndb_databases_info.py b/plugins/modules/ntnx_ndb_databases_info.py index a9e5430b7..c3644ed08 100644 --- a/plugins/modules/ntnx_ndb_databases_info.py +++ b/plugins/modules/ntnx_ndb_databases_info.py @@ -35,7 +35,7 @@ type: bool load_dbserver_cluster: description: - - load db serverv cluster in response + - load db server cluster in response type: bool order_by_dbserver_cluster: description: diff --git a/plugins/modules/ntnx_ndb_db_server_vms.py b/plugins/modules/ntnx_ndb_db_server_vms.py index adaf14702..2bdec4e48 100644 --- a/plugins/modules/ntnx_ndb_db_server_vms.py +++ b/plugins/modules/ntnx_ndb_db_server_vms.py @@ -111,7 +111,7 @@ type: str version_uuid: description: - - version UUID for softwware profile + - version UUID for software profile - if not given then latest version will be used type: str time_machine: diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index cbf5fbec3..940b14740 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -279,7 +279,7 @@ type: int autovacuum: description: - - on/off autovaccum + - on/off autovacuum - default is on type: str choices: ["on", "off"] @@ -305,7 +305,7 @@ type: float autovacuum_work_mem: description: - - autovacum work memory in KB + - autovacuum work memory in KB - default is -1 type: int autovacuum_max_workers: diff --git a/plugins/modules/ntnx_ndb_profiles_info.py b/plugins/modules/ntnx_ndb_profiles_info.py index f3a354dc3..ffed1b3b8 100644 --- a/plugins/modules/ntnx_ndb_profiles_info.py +++ b/plugins/modules/ntnx_ndb_profiles_info.py @@ -24,7 +24,7 @@ type: str version_id: description: - - vrsion uuid + - version uuid type: str latest_version: description: diff --git a/plugins/modules/ntnx_ndb_register_database.py b/plugins/modules/ntnx_ndb_register_database.py index 0c8b963c9..bb69f92f2 100644 --- a/plugins/modules/ntnx_ndb_register_database.py +++ b/plugins/modules/ntnx_ndb_register_database.py @@ -182,7 +182,7 @@ default: true postgres: description: - - potgres related configuration + - postgres related configuration type: dict suboptions: listener_port: diff --git a/plugins/modules/ntnx_ndb_register_db_server_vm.py b/plugins/modules/ntnx_ndb_register_db_server_vm.py index 3c5a9c240..ad49e4ebb 100644 --- a/plugins/modules/ntnx_ndb_register_db_server_vm.py +++ b/plugins/modules/ntnx_ndb_register_db_server_vm.py @@ -45,7 +45,7 @@ type: str postgres: description: - - potgres related configuration + - postgres related configuration type: dict suboptions: listener_port: diff --git a/plugins/modules/ntnx_recovery_plans.py b/plugins/modules/ntnx_recovery_plans.py index 6ef6678e3..9748b53c4 100644 --- a/plugins/modules/ntnx_recovery_plans.py +++ b/plugins/modules/ntnx_recovery_plans.py @@ -567,7 +567,7 @@ { "ip_config_list": [ { - "ip_address": "cutom_ip_1" + "ip_address": "custom_ip_1" } ], "vm_reference": { diff --git a/plugins/modules/ntnx_static_routes.py b/plugins/modules/ntnx_static_routes.py index d77e3c2da..ade2ff7a4 100644 --- a/plugins/modules/ntnx_static_routes.py +++ b/plugins/modules/ntnx_static_routes.py @@ -44,7 +44,7 @@ destination: description: - destination prefix eg. 10.2.3.0/24 - - for defaut static route give 0.0.0.0/0 + - for default static route give 0.0.0.0/0 required: true type: str next_hop: diff --git a/plugins/modules/ntnx_users.py b/plugins/modules/ntnx_users.py index 05915ece9..79c7a3b57 100644 --- a/plugins/modules/ntnx_users.py +++ b/plugins/modules/ntnx_users.py @@ -62,7 +62,7 @@ description: The UserPrincipalName of the user from the directory service. project: type: dict - description: project that belogs to + description: project that belongs to suboptions: name: type: str diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index 604135925..baf3848a2 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -100,7 +100,7 @@ suboptions: name: description: - - Storage containter Name + - Storage container Name - Mutually exclusive with C(uuid) type: str uuid: diff --git a/tests/unit/plugins/module_utils/test_entity.py b/tests/unit/plugins/module_utils/test_entity.py index 419263a2b..3529442b3 100644 --- a/tests/unit/plugins/module_utils/test_entity.py +++ b/tests/unit/plugins/module_utils/test_entity.py @@ -132,7 +132,7 @@ def test_negative_list_action(self): self.assertEqual(result["request"], req) self.assertEqual(entity.headers.get("Authorization"), None) - def test_raed_action(self): + def test_read_action(self): uuid = "test_uuid" req = { "method": "GET", From 4f60f3fef9ad531f6f9d226954f9f7cd21cc210e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Sun, 29 Sep 2024 16:53:17 +0200 Subject: [PATCH 4/9] plugins and tests spell checked (#491) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Thomas Sjögren --- plugins/doc_fragments/ntnx_operations.py | 2 +- plugins/module_utils/entity.py | 6 +++--- plugins/module_utils/ndb/db_server_vm.py | 2 +- plugins/module_utils/ndb/time_machines.py | 2 +- plugins/module_utils/prism/images.py | 2 +- plugins/module_utils/prism/permissions.py | 2 +- .../module_utils/prism/projects_internal.py | 2 +- plugins/module_utils/prism/subnets.py | 2 +- plugins/modules/ntnx_foundation.py | 20 +++++++++---------- .../modules/ntnx_foundation_image_upload.py | 2 +- .../modules/ntnx_karbon_registries_info.py | 2 +- .../modules/ntnx_ndb_database_snapshots.py | 2 +- plugins/modules/ntnx_ndb_databases.py | 2 +- plugins/modules/ntnx_ndb_db_servers_info.py | 2 +- .../modules/ntnx_ndb_maintenance_window.py | 2 +- plugins/modules/ntnx_ndb_profiles.py | 4 ++-- plugins/modules/ntnx_ndb_slas.py | 4 ++-- .../modules/ntnx_ndb_time_machines_info.py | 2 +- plugins/modules/ntnx_ndb_vlans.py | 2 +- plugins/modules/ntnx_recovery_plans.py | 8 ++++---- plugins/modules/ntnx_static_routes.py | 2 +- plugins/modules/ntnx_vms.py | 10 +++++----- .../tasks/crud.yml | 4 ++-- .../tasks/negative_scenarios.yml | 8 ++++---- .../targets/ntnx_ndb_clusters/tasks/CRUD.yml | 6 +++--- .../tasks/all_actions.yml | 2 +- .../ntnx_ndb_software_profiles/tasks/crud.yml | 2 +- .../targets/ntnx_ova/tasks/create_ova.yml | 4 ++-- .../nutanix_vms/tasks/negtaive_vm_update.yml | 4 ++-- 29 files changed, 57 insertions(+), 57 deletions(-) diff --git a/plugins/doc_fragments/ntnx_operations.py b/plugins/doc_fragments/ntnx_operations.py index 215484b4f..0b4af7c28 100644 --- a/plugins/doc_fragments/ntnx_operations.py +++ b/plugins/doc_fragments/ntnx_operations.py @@ -16,7 +16,7 @@ class ModuleDocFragment(object): state: description: - Specify state - - If C(state) is set to C(present) then the operation will be create the item + - If C(state) is set to C(present) then the operation will be create the item - >- If C(state) is set to C(absent) and if the item exists, then item is removed. diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index a2d8d97cd..63a79c7f9 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -221,7 +221,7 @@ def list( return resp - # "params" can be used to override module.params to create spec by other modules backened + # "params" can be used to override module.params to create spec by other modules backends def get_spec(self, old_spec=None, params=None, **kwargs): spec = copy.deepcopy(old_spec) or self._get_default_spec() @@ -344,7 +344,7 @@ def _build_url_with_query(self, url, query): def _fetch_url( self, url, method, data=None, raise_error=True, no_response=False, timeout=30 ): - # only jsonify if content-type supports, added to avoid incase of form-url-encoded type data + # only jsonify if content-type supports, added to avoid in case of form-url-encodeded type data if self.headers["Content-Type"] == "application/json" and data is not None: data = self.module.jsonify(data) @@ -365,7 +365,7 @@ def _fetch_url( # buffer size with ref. to max read size of http.client.HTTPResponse.read() definition buffer_size = 65536 - # From ansible-core>=2.13, incase of http error, urllib.HTTPError object is returned in resp + # From ansible-core>=2.13, in case of http error, urllib.HTTPError object is returned in resp # as per the docs of ansible we need to use body in that case. if not resp or status_code >= 400: # get body containing error diff --git a/plugins/module_utils/ndb/db_server_vm.py b/plugins/module_utils/ndb/db_server_vm.py index 670a2f4da..a46ba1f4d 100644 --- a/plugins/module_utils/ndb/db_server_vm.py +++ b/plugins/module_utils/ndb/db_server_vm.py @@ -631,7 +631,7 @@ def build_spec_vms(self, payload, vms, **kwargs): # noqa: C901 cluster = Cluster(self.module) clusters = cluster.get_all_clusters_name_uuid_map() - # spec with default vlaues + # spec with default values spec = { "properties": [], "vmName": "", diff --git a/plugins/module_utils/ndb/time_machines.py b/plugins/module_utils/ndb/time_machines.py index 7f78858c7..bc1e566a9 100644 --- a/plugins/module_utils/ndb/time_machines.py +++ b/plugins/module_utils/ndb/time_machines.py @@ -170,7 +170,7 @@ def get_spec(self, old_spec, params=None, **kwargs): if err: return None, err - # set destination clusters incase of HA instance + # set destination clusters in case of HA instance if params.get("clusters"): cluster_uuids = [] diff --git a/plugins/module_utils/prism/images.py b/plugins/module_utils/prism/images.py index 43c0bff4a..169ff24e4 100644 --- a/plugins/module_utils/prism/images.py +++ b/plugins/module_utils/prism/images.py @@ -20,7 +20,7 @@ def __init__(self, module, upload_image=False): "Accept": "application/json", } - # add checksum headers if given incase of local upload + # add checksum headers if given in case of local upload checksum = module.params.get("checksum") if checksum and module.params.get("source_path"): additional_headers["X-Nutanix-Checksum-Type"] = checksum[ diff --git a/plugins/module_utils/prism/permissions.py b/plugins/module_utils/prism/permissions.py index 32e8326ce..5a701bc8a 100644 --- a/plugins/module_utils/prism/permissions.py +++ b/plugins/module_utils/prism/permissions.py @@ -28,7 +28,7 @@ def get_uuid(self, value, key="name", raise_error=True, no_response=False): if entity["spec"]["name"] == value: return entity["metadata"]["uuid"] - # Incase there are more entities to check + # In case there are more entities to check while resp["total_matches"] > resp["length"] + resp["offset"]: filter_spec["length"] = self.entities_limitation filter_spec["offset"] = filter_spec["offset"] + self.entities_limitation diff --git a/plugins/module_utils/prism/projects_internal.py b/plugins/module_utils/prism/projects_internal.py index bf1806451..61f235e8e 100644 --- a/plugins/module_utils/prism/projects_internal.py +++ b/plugins/module_utils/prism/projects_internal.py @@ -365,7 +365,7 @@ def _build_spec_role_mappings(self, payload, role_mappings): _acp = ACP(self.module) # First check existing acps of project w.r.t to role mapping, if UPDATE/DELETE of acp is required - # Incase its a UPDATE acp for role we pop the entry from role_user_groups_map, + # In case its a UPDATE acp for role we pop the entry from role_user_groups_map, # so that we are left with roles for which new acps are to be created. for acp in payload["spec"]["access_control_policy_list"]: diff --git a/plugins/module_utils/prism/subnets.py b/plugins/module_utils/prism/subnets.py index 1eee1e030..d7eac4632 100644 --- a/plugins/module_utils/prism/subnets.py +++ b/plugins/module_utils/prism/subnets.py @@ -159,7 +159,7 @@ def get_subnet_uuid(config, module): name = config.get("name") or config.get("subnet_name") uuid = "" - # incase subnet of particular cluster is needed + # in case subnet of particular cluster is needed if config.get("cluster_uuid"): filter_spec = {"filter": "{0}=={1}".format("name", name)} resp = subnet.list(data=filter_spec) diff --git a/plugins/modules/ntnx_foundation.py b/plugins/modules/ntnx_foundation.py index ee1aa9d9f..803e2f2b2 100644 --- a/plugins/modules/ntnx_foundation.py +++ b/plugins/modules/ntnx_foundation.py @@ -149,13 +149,13 @@ ipmi_password: description: - ipmi password, override default_ipmi_password - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false ipmi_user: description: - ipmi user, override default_ipmi_user - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false ipmi_netmask: @@ -180,7 +180,7 @@ required: false ipv6_address: description: - - ipv6 address, required incase of using cvm for imaging + - ipv6 address, required in case of using cvm for imaging type: str required: false device_hint: @@ -197,7 +197,7 @@ required: false current_network_interface: description: - - current network interface, required incase of using cvm for imaging + - current network interface, required in case of using cvm for imaging type: str required: false rdma_passthrough: @@ -427,12 +427,12 @@ required: false ipv6_address: description: - - ipv6 address, required incase of using cvm for imaging + - ipv6 address, required in case of using cvm for imaging type: str required: false current_network_interface: description: - - current network interface, required incase of using cvm for imaging + - current network interface, required in case of using cvm for imaging type: str required: false cluster_id: @@ -443,13 +443,13 @@ ipmi_password: description: - ipmi password, override default_ipmi_password - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false ipmi_user: description: - ipmi user, override default_ipmi_user - - mandatory incase of ipmi based imaging and bare metal nodes + - mandatory in case of ipmi based imaging and bare metal nodes type: str required: false device_hint: @@ -792,12 +792,12 @@ required: false default_ipmi_user: description: - - default ipmi username, required either at node level or here incase of ipmi based imaging + - default ipmi username, required either at node level or here in case of ipmi based imaging type: str required: false default_ipmi_password: description: - - default ipmi password, required either at node level or here incase of ipmi based imaging + - default ipmi password, required either at node level or here in case of ipmi based imaging type: str required: false skip_hypervisor: diff --git a/plugins/modules/ntnx_foundation_image_upload.py b/plugins/modules/ntnx_foundation_image_upload.py index 719f66eaa..bd77547ed 100644 --- a/plugins/modules/ntnx_foundation_image_upload.py +++ b/plugins/modules/ntnx_foundation_image_upload.py @@ -17,7 +17,7 @@ source: description: - local full path of installer file where the ansible playbook runs - - mandatory incase of upload i.e. state=present + - mandatory in case of upload i.e. state=present type: str required: false filename: diff --git a/plugins/modules/ntnx_karbon_registries_info.py b/plugins/modules/ntnx_karbon_registries_info.py index 6f11e446d..ae280f41f 100644 --- a/plugins/modules/ntnx_karbon_registries_info.py +++ b/plugins/modules/ntnx_karbon_registries_info.py @@ -94,7 +94,7 @@ def get_registries(module, result): # If there is no registries, # response will be empty list causing error in entity class - # so do status code checks here incase of other failures. + # so do status code checks here in case of other failures. # During failures response is of type dict else its list resp = registry.read(raise_error=False) if isinstance(resp, dict) and resp.get("code") >= 300: diff --git a/plugins/modules/ntnx_ndb_database_snapshots.py b/plugins/modules/ntnx_ndb_database_snapshots.py index 2107ade08..210e64a8e 100644 --- a/plugins/modules/ntnx_ndb_database_snapshots.py +++ b/plugins/modules/ntnx_ndb_database_snapshots.py @@ -31,7 +31,7 @@ type: str clusters: description: - - list of clusters incase snapshots needs to be replicated to secondary clusters + - list of clusters in case snapshots needs to be replicated to secondary clusters - if secondary clusters of time machines are mentioned, then this module won't track the replication process - clusters changes are not considered during update, for replication use ntnx_ndb_replicate_database_snapshots type: list diff --git a/plugins/modules/ntnx_ndb_databases.py b/plugins/modules/ntnx_ndb_databases.py index 496e10ded..733f1b6a9 100644 --- a/plugins/modules/ntnx_ndb_databases.py +++ b/plugins/modules/ntnx_ndb_databases.py @@ -331,7 +331,7 @@ - allowed for HA instance type: description: - - if its a HA or singe instance + - if its a HA or single instance - mandatory for creation type: str choices: ["single", "ha"] diff --git a/plugins/modules/ntnx_ndb_db_servers_info.py b/plugins/modules/ntnx_ndb_db_servers_info.py index fd18f8f20..91d8057d9 100644 --- a/plugins/modules/ntnx_ndb_db_servers_info.py +++ b/plugins/modules/ntnx_ndb_db_servers_info.py @@ -59,7 +59,7 @@ type: bool value: description: - - vlaue as per C(value_type) + - value as per C(value_type) type: str value_type: description: diff --git a/plugins/modules/ntnx_ndb_maintenance_window.py b/plugins/modules/ntnx_ndb_maintenance_window.py index 87a26080c..12a18e40c 100644 --- a/plugins/modules/ntnx_ndb_maintenance_window.py +++ b/plugins/modules/ntnx_ndb_maintenance_window.py @@ -42,7 +42,7 @@ type: int start_time: description: - - start time of maintenance in formate 'hh:mm:ss' + - start time of maintenance in format 'hh:mm:ss' type: str timezone: description: diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index 940b14740..d842cd790 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -311,7 +311,7 @@ autovacuum_max_workers: description: - autovacuum max workers - - deadefaultfult is 3 + - default is 3 type: int autovacuum_vacuum_cost_delay: description: @@ -1081,7 +1081,7 @@ def create_profile(module, result): result["response"] = resp uuid = resp.get("id") - # incase there is process of replication triggered, operation info is received + # in case there is process of replication triggered, operation info is received if profile_type == "software" and not uuid: uuid = resp.get("entityId") diff --git a/plugins/modules/ntnx_ndb_slas.py b/plugins/modules/ntnx_ndb_slas.py index d858bcc6b..72efbc9bb 100644 --- a/plugins/modules/ntnx_ndb_slas.py +++ b/plugins/modules/ntnx_ndb_slas.py @@ -10,9 +10,9 @@ DOCUMENTATION = r""" --- module: ntnx_ndb_slas -short_description: moudle for creating, updating and deleting slas +short_description: module for creating, updating and deleting slas version_added: 1.8.0 -description: moudle for creating, updating and deleting slas +description: module for creating, updating and deleting slas options: name: description: diff --git a/plugins/modules/ntnx_ndb_time_machines_info.py b/plugins/modules/ntnx_ndb_time_machines_info.py index f4a095f0c..952d1c61a 100644 --- a/plugins/modules/ntnx_ndb_time_machines_info.py +++ b/plugins/modules/ntnx_ndb_time_machines_info.py @@ -91,7 +91,7 @@ nutanix_username: "" nutanix_password: "" validate_certs: false - uuid: "" + uuid: "" register: result """ RETURN = r""" diff --git a/plugins/modules/ntnx_ndb_vlans.py b/plugins/modules/ntnx_ndb_vlans.py index 22c85b60b..e020c9f0f 100644 --- a/plugins/modules/ntnx_ndb_vlans.py +++ b/plugins/modules/ntnx_ndb_vlans.py @@ -28,7 +28,7 @@ type: str vlan_type: description: - - wether the vlan is managed or no + - whether the vlan is managed or not - update allowed type: str choices: ["DHCP", "Static"] diff --git a/plugins/modules/ntnx_recovery_plans.py b/plugins/modules/ntnx_recovery_plans.py index 9748b53c4..5b5eb9910 100644 --- a/plugins/modules/ntnx_recovery_plans.py +++ b/plugins/modules/ntnx_recovery_plans.py @@ -141,7 +141,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -189,7 +189,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -246,7 +246,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: @@ -294,7 +294,7 @@ type: str required: true gateway_ip: - description: gateway ip of subnet incase of IPAM + description: gateway ip of subnet in case of IPAM type: str required: false prefix: diff --git a/plugins/modules/ntnx_static_routes.py b/plugins/modules/ntnx_static_routes.py index ade2ff7a4..9152a933a 100644 --- a/plugins/modules/ntnx_static_routes.py +++ b/plugins/modules/ntnx_static_routes.py @@ -34,7 +34,7 @@ description: - list of static routes to be overridden in vpc. - mutually exclusive with C(remove_all_routes) - - required incase remove_all_categories is not given + - required in case remove_all_categories is not given - default static route can be mentioned in this with destination - 0.0.0.0/0 - Only one default static route is allowed required: false diff --git a/plugins/modules/ntnx_vms.py b/plugins/modules/ntnx_vms.py index baf3848a2..d495a6a22 100644 --- a/plugins/modules/ntnx_vms.py +++ b/plugins/modules/ntnx_vms.py @@ -17,14 +17,14 @@ state: description: - Specify state - - If C(state) is set to C(present) then the operation will be create the item + - If C(state) is set to C(present) then the operation will be create the item - >- If C(state) is set to C(absent) and if the item exists, then item is removed. - - If C(state) is set to C(power_on) then the operation will be power on the VM - - If C(state) is set to C(power_off) then the operation will be power off the VM - - If C(state) is set to C(soft_shutdown) then the operation will be soft shutdown the VM - - If C(state) is set to C(hard_poweroff) then the operation will be hard poweroff the VM + - If C(state) is set to C(power_on) then the operation will power on the VM + - If C(state) is set to C(power_off) then the operation will power off the VM + - If C(state) is set to C(soft_shutdown) then the operation will shutdown the VM + - If C(state) is set to C(hard_poweroff) then the operation will hard poweroff the VM choices: - present - absent diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml index 34243e48c..c795d674a 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml @@ -345,7 +345,7 @@ memory_gb: 8 # for etcd min 8 disk_gb: 120 add_labels: - propert.-+]y5: "string" + property.-+]y5: "string" propert5: "string" property4: "string+-.3-@" register: result @@ -373,7 +373,7 @@ property1: "test-property1" property2: "test-property2" property3: "test-property3" - propert.-+]y5: "string" + property.-+]y5: "string" register: result ignore_errors: true diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml index 58a471a17..f0c9477e0 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml @@ -47,7 +47,7 @@ - result.error == "cpu cannot be less then 4" - result.msg == "Failed generating create cluster spec" fail_msg: " Fail: cluster created with cpu less than minimum" - success_msg: " Pass: Returned as expected" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with memory_gb less than minimum ntnx_karbon_clusters: @@ -90,7 +90,7 @@ - result.error == "memory_gb cannot be less then 8" - result.msg == "Failed generating create cluster spec" fail_msg: " Fail: cluster created with memory_gb size less than minimum" - success_msg: " Pass: Returned as expected" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with wrong num_instances for master nodes ntnx_karbon_clusters: @@ -133,7 +133,7 @@ - result.error == "value of masters.num_instances must be 1 or 2" - result.msg == "Failed generating create cluster spec" fail_msg: " Fail: cluster created with wrong num_instances for master nodes" - success_msg: " Pass: Returned as expected" + success_msg: " Pass: Returned as expected" ############################# - name: create cluster with wrong num_instances for etcd nodes ntnx_karbon_clusters: @@ -176,5 +176,5 @@ - result.error == "value of etcd.num_instances must be 1, 3 or 5" - result.msg == "Failed generating create cluster spec" fail_msg: " Fail: cluster created with wrong num_instances for etcd nodes" - success_msg: " Pass: Returned as expected" + success_msg: " Pass: Returned as expected" ############################# diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml index b3313d363..c87757a3f 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml @@ -49,7 +49,7 @@ - result.response.networksInfo[0].networkInfo[3].value == "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" - result.response.networksInfo[0].type== "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" fail_msg: "fail: Wring with check mode for registering cluster" - success_msg: "pass: returned as expected" + success_msg: "pass: Returned as expected" - name: Register cluster with prism_vlan ntnx_ndb_clusters: @@ -136,8 +136,8 @@ - result.response.username is defined - result.response.password is defined - result.cluster_uuid is defined - fail_msg: "fail: update cluster credential while check_mode" - success_msg: "pass: returned as expected" + fail_msg: "fail: update cluster credeential while check_mode" + success_msg: "pass: Returned as expected" ################################################################ - name: Negative Scenarios update storage container diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml index eaeea2156..ff5245cbb 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -369,7 +369,7 @@ that: - result == expected_result fail_msg: "Unable to create restore using pitr timestamp spec" - success_msg: "Spec for database restore using pitr timestamp created successfully" + success_msg: "Spec for database restore using pitr timetsmap created successfully" - name: create restore database spec with latest snapshot check_mode: yes diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml index 2879a64f8..22b872387 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -210,7 +210,7 @@ - result.response.profile.name == "{{profile1_name}}-updated1" - result.response.profile.description == "{{profile1_name}}-desc-updated" - fail_msg: "Fail: Update did not get skipped due to no state changes" + fail_msg: "Fail: Update didn't get skipped due to no state changes" success_msg: "Pass: Update skipped successfully due to no state changes" - name: create software profile version spec diff --git a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml index f9dea0812..96cdc4797 100644 --- a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml +++ b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml @@ -15,8 +15,8 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: "Fail: Unable to create VM with minimum requirements " - success_msg: "Success: VM with minimum requirements created successfully " + fail_msg: 'Fail: Unable to create VM with minimum requirements ' + success_msg: 'Success: VM with minimum requirements created successfully ' ######################################### - name: create_ova_image with check mode ntnx_vms_ova: diff --git a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml index d2c0481d3..18afc583c 100644 --- a/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml +++ b/tests/integration/targets/nutanix_vms/tasks/negtaive_vm_update.yml @@ -240,7 +240,7 @@ fail_msg: " Fail: decreasing the size of the IDE disk" success_msg: " Success: returned error as expected " ################ -- name: Update VM by change ths bus type of ide disk +- name: Update VM by change the bus type of ide disk ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" disks: @@ -258,7 +258,7 @@ success_msg: " Success: returned error as expected " fail_msg: " Fail: Update VM by change ths bus type of ide disk successfully " ############ -- name: Update VM by adding IDE disk while vm is on +- name: Update VM by adding IDE disk while vm is on ntnx_vms: vm_uuid: "{{ vm.vm_uuid }}" disks: From d072af10ad6d9a5e27be8be0ad849cb6196fd5bd Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Thu, 26 Sep 2024 15:53:36 +0300 Subject: [PATCH 5/9] Correct multiple spelling errors to improve readability --- tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml | 2 +- .../targets/ntnx_ndb_databases_actions/tasks/all_actions.yml | 2 +- .../targets/ntnx_ndb_software_profiles/tasks/crud.yml | 2 +- tests/integration/targets/ntnx_ova/tasks/create_ova.yml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml index c87757a3f..cba6dfeb2 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml @@ -136,7 +136,7 @@ - result.response.username is defined - result.response.password is defined - result.cluster_uuid is defined - fail_msg: "fail: update cluster credeential while check_mode" + fail_msg: "fail: update cluster credential while check_mode" success_msg: "pass: Returned as expected" ################################################################ diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml index ff5245cbb..eaeea2156 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -369,7 +369,7 @@ that: - result == expected_result fail_msg: "Unable to create restore using pitr timestamp spec" - success_msg: "Spec for database restore using pitr timetsmap created successfully" + success_msg: "Spec for database restore using pitr timestamp created successfully" - name: create restore database spec with latest snapshot check_mode: yes diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml index 22b872387..2879a64f8 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -210,7 +210,7 @@ - result.response.profile.name == "{{profile1_name}}-updated1" - result.response.profile.description == "{{profile1_name}}-desc-updated" - fail_msg: "Fail: Update didn't get skipped due to no state changes" + fail_msg: "Fail: Update did not get skipped due to no state changes" success_msg: "Pass: Update skipped successfully due to no state changes" - name: create software profile version spec diff --git a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml index 96cdc4797..f9dea0812 100644 --- a/tests/integration/targets/ntnx_ova/tasks/create_ova.yml +++ b/tests/integration/targets/ntnx_ova/tasks/create_ova.yml @@ -15,8 +15,8 @@ that: - vm.response is defined - vm.response.status.state == 'COMPLETE' - fail_msg: 'Fail: Unable to create VM with minimum requirements ' - success_msg: 'Success: VM with minimum requirements created successfully ' + fail_msg: "Fail: Unable to create VM with minimum requirements " + success_msg: "Success: VM with minimum requirements created successfully " ######################################### - name: create_ova_image with check mode ntnx_vms_ova: From 6d971881326d13679bc741f5250400487c0ac56b Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Thu, 26 Sep 2024 16:24:54 +0300 Subject: [PATCH 6/9] Adding more spelling fixes --- plugins/module_utils/entity.py | 2 +- plugins/modules/ntnx_ndb_profiles.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index 63a79c7f9..239f72262 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -344,7 +344,7 @@ def _build_url_with_query(self, url, query): def _fetch_url( self, url, method, data=None, raise_error=True, no_response=False, timeout=30 ): - # only jsonify if content-type supports, added to avoid in case of form-url-encodeded type data + # only jsonify if content-type supports, added to avoid incase of form-url-encoded type data if self.headers["Content-Type"] == "application/json" and data is not None: data = self.module.jsonify(data) diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index d842cd790..940b14740 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -311,7 +311,7 @@ autovacuum_max_workers: description: - autovacuum max workers - - default is 3 + - deadefaultfult is 3 type: int autovacuum_vacuum_cost_delay: description: @@ -1081,7 +1081,7 @@ def create_profile(module, result): result["response"] = resp uuid = resp.get("id") - # in case there is process of replication triggered, operation info is received + # incase there is process of replication triggered, operation info is received if profile_type == "software" and not uuid: uuid = resp.get("entityId") From 7f67138a1e59e2e41242ad014c4a3625928b5a00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Sun, 29 Sep 2024 16:53:17 +0200 Subject: [PATCH 7/9] plugins and tests spell checked (#491) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Thomas Sjögren --- plugins/module_utils/entity.py | 2 +- plugins/modules/ntnx_ndb_profiles.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index 239f72262..63a79c7f9 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -344,7 +344,7 @@ def _build_url_with_query(self, url, query): def _fetch_url( self, url, method, data=None, raise_error=True, no_response=False, timeout=30 ): - # only jsonify if content-type supports, added to avoid incase of form-url-encoded type data + # only jsonify if content-type supports, added to avoid in case of form-url-encodeded type data if self.headers["Content-Type"] == "application/json" and data is not None: data = self.module.jsonify(data) diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py index 940b14740..d842cd790 100644 --- a/plugins/modules/ntnx_ndb_profiles.py +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -311,7 +311,7 @@ autovacuum_max_workers: description: - autovacuum max workers - - deadefaultfult is 3 + - default is 3 type: int autovacuum_vacuum_cost_delay: description: @@ -1081,7 +1081,7 @@ def create_profile(module, result): result["response"] = resp uuid = resp.get("id") - # incase there is process of replication triggered, operation info is received + # in case there is process of replication triggered, operation info is received if profile_type == "software" and not uuid: uuid = resp.get("entityId") From 2295e0c5a1c8f200a53ceab5062d30ac119e9bce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Sj=C3=B6gren?= Date: Sun, 29 Sep 2024 16:53:17 +0200 Subject: [PATCH 8/9] plugins and tests spell checked (#491) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Thomas Sjögren --- plugins/module_utils/prism/protection_rules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/module_utils/prism/protection_rules.py b/plugins/module_utils/prism/protection_rules.py index 206d85c6a..5808eaae3 100644 --- a/plugins/module_utils/prism/protection_rules.py +++ b/plugins/module_utils/prism/protection_rules.py @@ -110,7 +110,7 @@ def _build_spec_schedules(self, payload, schedules): ): return ( None, - "rpo, rpo_unit, snapshot_type and at least one policy are required fields for asynchronous snapshot schedule", + "rpo, rpo_unit, snapshot_type and at least one policy are required fields for aysynchronous snapshot schedule", ) spec["recovery_point_objective_secs"], err = convert_to_secs( From 76443a33e31d4f7ee12ae5e59171fee6fb2d9e12 Mon Sep 17 00:00:00 2001 From: george-ghawali Date: Tue, 8 Oct 2024 10:52:52 +0300 Subject: [PATCH 9/9] Resolving conflicts --- examples/acp.yml | 10 +- examples/fc/fc.yml | 177 +++++---- .../node_discovery_network_info.yml | 18 +- examples/images.yml | 26 +- examples/karbon/create_registries.yml | 53 ++- examples/karbon/registries_info.yml | 16 +- examples/ndb/db_server_vms.yml | 348 +++++++++--------- examples/roles_crud.yml | 18 +- examples/vm.yml | 25 +- 9 files changed, 338 insertions(+), 353 deletions(-) diff --git a/examples/acp.yml b/examples/acp.yml index 297bc00df..1d3168200 100644 --- a/examples/acp.yml +++ b/examples/acp.yml @@ -2,8 +2,6 @@ - name: ACP playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,9 +10,9 @@ validate_certs: false tasks: - - name: Create ACP with all specifications - ntnx_acps: - validate_certs: False + - name: Create ACP with all specfactions + nutanix.ncp.ntnx_acps: + validate_certs: false state: present nutanix_host: "{{ IP }}" nutanix_username: "{{ username }}" @@ -40,7 +38,7 @@ collection: ALL - name: Delete ACP - ntnx_acps: + nutanix.ncp.ntnx_acps: state: absent acp_uuid: "{{ acp_uuid }}" register: result diff --git a/examples/fc/fc.yml b/examples/fc/fc.yml index 1053195d8..0489d25d1 100644 --- a/examples/fc/fc.yml +++ b/examples/fc/fc.yml @@ -2,99 +2,96 @@ - name: Foundation Central Playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp - tasks: - - name: Nodes Imaging with Cluster Creation with manual mode. - ntnx_foundation_central: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test" - # skip_cluster_creation: false #set this to true to skip cluster creation - common_network_settings: - cvm_dns_servers: - - 10.x.xx.xx - hypervisor_dns_servers: - - 10.x.xx.xx - cvm_ntp_servers: - - "ntp" - hypervisor_ntp_servers: - - "ntp" - nodes_list: - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-1" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" + - name: Nodes Imaging with Cluster Creation with manual mode. + nutanix.ncp.ntnx_foundation_central: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: test + # skip_cluster_creation: false #set this to true to skip cluster creation + common_network_settings: + cvm_dns_servers: + - 10.x.xx.xx + hypervisor_dns_servers: + - 10.x.xx.xx + cvm_ntp_servers: + - ntp + hypervisor_ntp_servers: + - ntp + nodes_list: + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-1 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm - - manual_mode: - cvm_gateway: "10.xx.xx.xx" - cvm_netmask: "xx.xx.xx.xx" - cvm_ip: "10.x.xx.xx" - hypervisor_gateway: "10.x.xx.xxx" - hypervisor_netmask: "xx.xx.xx.xx" - hypervisor_ip: "10.x.x.xx" - hypervisor_hostname: "Host-2" - imaged_node_uuid: "" - use_existing_network_settings: false - ipmi_gateway: "10.x.xx.xx" - ipmi_netmask: "xx.xx.xx.xx" - ipmi_ip: "10.x.xx.xx" - image_now: true - hypervisor_type: "kvm" + - manual_mode: + cvm_gateway: 10.xx.xx.xx + cvm_netmask: xx.xx.xx.xx + cvm_ip: 10.x.xx.xx + hypervisor_gateway: 10.x.xx.xxx + hypervisor_netmask: xx.xx.xx.xx + hypervisor_ip: 10.x.x.xx + hypervisor_hostname: Host-2 + imaged_node_uuid: + use_existing_network_settings: false + ipmi_gateway: 10.x.xx.xx + ipmi_netmask: xx.xx.xx.xx + ipmi_ip: 10.x.xx.xx + image_now: true + hypervisor_type: kvm - redundancy_factor: 2 - aos_package_url: "" - hypervisor_iso_details: - url: "" - register: output + redundancy_factor: 2 + aos_package_url: + hypervisor_iso_details: + url: + register: output - - name: Nodes Imaging without Cluster Creation with discovery mode. - ntnx_foundation_central: - nutanix_host: "{{ pc }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - cluster_name: "test" - skip_cluster_creation: true - common_network_settings: - cvm_dns_servers: - - 10.x.xx.xx - hypervisor_dns_servers: - - 10.x.xx.xx - cvm_ntp_servers: - - "ntp" - hypervisor_ntp_servers: - - "ntp" - nodes_list: - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - - discovery_mode: - node_serial: "" - discovery_override: - cvm_ip: + - name: Nodes Imaging without Cluster Creation with discovery mode. + nutanix.ncp.ntnx_foundation_central: + nutanix_host: "{{ pc }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + cluster_name: test + skip_cluster_creation: true + common_network_settings: + cvm_dns_servers: + - 10.x.xx.xx + hypervisor_dns_servers: + - 10.x.xx.xx + cvm_ntp_servers: + - ntp + hypervisor_ntp_servers: + - ntp + nodes_list: + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + - discovery_mode: + node_serial: + discovery_override: + cvm_ip: - redundancy_factor: 2 - aos_package_url: "" - hypervisor_iso_details: - url: "" - register: output + redundancy_factor: 2 + aos_package_url: + hypervisor_iso_details: + url: + register: output - - name: output of list - debug: - msg: '{{ output }}' + - name: Output of list + ansible.builtin.debug: + msg: "{{ output }}" diff --git a/examples/foundation/node_discovery_network_info.yml b/examples/foundation/node_discovery_network_info.yml index 27a3a550d..526d77338 100644 --- a/examples/foundation/node_discovery_network_info.yml +++ b/examples/foundation/node_discovery_network_info.yml @@ -1,25 +1,25 @@ +--- # Here we will discover nodes and also get node network info of particular some discovered nodes - name: Discover nodes and get their network info hosts: localhost gather_facts: false - collections: - - nutanix.ncp tasks: - name: Discover all nodes - ntnx_foundation_discover_nodes_info: - nutanix_host: "10.xx.xx.xx" - # resume line 12 to include configured(nodes part of cluster) nodes in the output - # include_configured: true + nutanix.ncp.ntnx_foundation_discover_nodes_info: + nutanix_host: 10.xx.xx.xx + # unskip line 12 to include configured(nodes part of cluster) nodes in the output + # include_configured: true register: discovered_nodes # get network info of nodes discovered from ntnx_foundation_discover_nodes_info module - name: Get node network info of some discovered nodes - ntnx_foundation_node_network_info: - nutanix_host: "10.xx.xx.xx" + nutanix.ncp.ntnx_foundation_node_network_info: + nutanix_host: 10.xx.xx.xx nodes: - "{{discovered_nodes.blocks.0.nodes.0.ipv6_address}}" - "{{discovered_nodes.blocks.1.nodes.0.ipv6_address}}" register: result - - debug: + - name: Print node network info + ansible.builtin.debug: msg: "{{ result }}" diff --git a/examples/images.yml b/examples/images.yml index 5d2db66d6..5624ac958 100644 --- a/examples/images.yml +++ b/examples/images.yml @@ -2,8 +2,6 @@ - name: Images playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,14 +10,14 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: image_uuid: "" source_path: "" source_uri: "" - clusters_name: "" + clusters_name: "" - - name: create image from local workstation - ntnx_images: + - name: Create image from local workstation + nutanix.ncp.ntnx_images: state: "present" source_path: "{{source_path}}" clusters: @@ -38,8 +36,8 @@ product_version: "1.2.0" wait: true - - name: create image from with source as remote server file location - ntnx_images: + - name: Create image from with source as remote server file location + nutanix.ncp.ntnx_images: state: "present" source_uri: "{{source_uri}}" clusters: @@ -58,8 +56,8 @@ product_version: "1.2.0" wait: true - - name: override categories of existing image - ntnx_images: + - name: Override categories of existing image + nutanix.ncp.ntnx_images: state: "present" image_uuid: "{{image-uuid}}" categories: @@ -69,15 +67,15 @@ - Backup wait: true - - name: detach all categories from existing image - ntnx_images: + - name: Dettach all categories from existing image + nutanix.ncp.ntnx_images: state: "present" image_uuid: "00000000-0000-0000-0000-000000000000" remove_categories: true wait: true - - name: delete existing image - ntnx_images: + - name: Delete existing image + nutanix.ncp.ntnx_images: state: "absent" image_uuid: "00000000-0000-0000-0000-000000000000" wait: true diff --git a/examples/karbon/create_registries.yml b/examples/karbon/create_registries.yml index d843ace0b..5992fbee8 100644 --- a/examples/karbon/create_registries.yml +++ b/examples/karbon/create_registries.yml @@ -1,9 +1,7 @@ --- -- name: create registries +- name: Create registeries hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,30 +10,31 @@ validate_certs: false tasks: - - set_fact: - registry_name: - url: - port_number: - username: - password: + - name: Set vars + ansible.builtin.set_fact: + registry_name: + url: + port_number: + username: + password: - - name: create registry - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - port: "{{port_number}}" - register: result + - name: Create registry + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + url: "{{ url }}" + port: "{{ port_number }}" + register: result - - name: delete registry - ntnx_karbon_registries: - name: "{{registry_name}}" - state: absent - register: result + - name: Delete registry + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + state: absent + register: result - - name: create registry with username and password - ntnx_karbon_registries: - name: "{{registry_name}}" - url: "{{url}}" - username: "{{username}}" - password: "{{password}}" - register: result + - name: Create registry with username and password + nutanix.ncp.ntnx_karbon_registries: + name: "{{ registry_name }}" + url: "{{ url }}" + username: "{{ username }}" + password: "{{ password }}" + register: result diff --git a/examples/karbon/registries_info.yml b/examples/karbon/registries_info.yml index 526cabd64..935658ee6 100644 --- a/examples/karbon/registries_info.yml +++ b/examples/karbon/registries_info.yml @@ -1,9 +1,7 @@ --- -- name: get registries info +- name: Get registeries info hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,11 +10,11 @@ validate_certs: false tasks: - - name: test getting all registries - ntnx_karbon_registries_info: - register: registries + - name: Test getting all registries + nutanix.ncp.ntnx_karbon_registries_info: + register: registries - - name: test getting particular register using name - ntnx_karbon_registries_info: + - name: Test getting particular register using name + nutanix.ncp.ntnx_karbon_registries_info: registry_name: "{{ registries.response[1].name }}" - register: result + register: result diff --git a/examples/ndb/db_server_vms.yml b/examples/ndb/db_server_vms.yml index 131b49457..faa0f288a 100644 --- a/examples/ndb/db_server_vms.yml +++ b/examples/ndb/db_server_vms.yml @@ -2,8 +2,6 @@ - name: NDB db server vms hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,43 +10,43 @@ validate_certs: false tasks: - - name: create spec for db server vm using time machine - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True - name: "ansible-created-vm1-from-time-machine" - desc: "ansible-created-vm1-from-time-machine-time-machine" + - name: Create spec for db server vm using time machine + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + name: ansible-created-vm1-from-time-machine + desc: ansible-created-vm1-from-time-machine-time-machine time_machine: - uuid: "test_uuid" - snapshot_uuid: "test_snapshot_uuid" + uuid: test_uuid + snapshot_uuid: test_snapshot_uuid compute_profile: - uuid: "test_compute_uuid" + uuid: test_compute_uuid network_profile: - uuid: "test_network_uuid" + uuid: test_network_uuid cluster: - uuid: "test_cluster_uuid" - password: "test_password" - pub_ssh_key: "test_public_key" - database_type: "postgres_database" + uuid: test_cluster_uuid + password: test_password + pub_ssh_key: test_public_key + database_type: postgres_database automated_patching: maintenance_window: - uuid: "test_window_uuid" + uuid: test_window_uuid tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: check_mode_result - - name: create spec for db server vm using software profile and names of profile - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True + - name: Create spec for db server vm using software profile and names of profile + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -59,25 +57,25 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - name: create db server vm using software profile - ntnx_ndb_db_server_vms: - wait: True + - name: Create db server vm using software profile + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true name: "{{ vm1_name }}" - desc: "ansible-created-vm1-desc" + desc: ansible-created-vm1-desc software_profile: name: "{{ software_profile.name }}" compute_profile: @@ -88,226 +86,226 @@ name: "{{ cluster.cluster1.name }}" password: "{{ vm_password }}" pub_ssh_key: "{{ public_ssh_key }}" - time_zone: "UTC" - database_type: "postgres_database" + time_zone: UTC + database_type: postgres_database automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result - - name: update db server vm name, desc, credentials, tags - ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" - name: "{{vm1_name_updated}}" - desc: "ansible-created-vm1-updated-desc" - reset_name_in_ntnx_cluster: True - reset_desc_in_ntnx_cluster: True + - name: Update db server vm name, desc, credentials, tags + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + uuid: "{{ db_server_uuid }}" + name: "{{ vm1_name_updated }}" + desc: ansible-created-vm1-updated-desc + reset_name_in_ntnx_cluster: true + reset_desc_in_ntnx_cluster: true update_credentials: - - username: "{{vm_username}}" - password: "{{vm_password}}" + - username: "{{ vm_username }}" + password: "{{ vm_password }}" tags: ansible-db-server-vms: ansible-updated register: result - - name: create spec for update db server vm credentials - check_mode: yes - ntnx_ndb_db_server_vms: - wait: True - uuid: "{{db_server_uuid}}" + - name: Create spec for update db server vm credentials + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + wait: true + uuid: "{{ db_server_uuid }}" update_credentials: - - username: "user" - password: "pass" + - username: user + password: pass register: result - name: List NDB db_servers - ntnx_ndb_db_servers_info: + nutanix.ncp.ntnx_ndb_db_servers_info: register: db_servers - - name: get NDB db_servers using it's name - ntnx_ndb_db_servers_info: + - name: Get NDB db_servers using it's name + nutanix.ncp.ntnx_ndb_db_servers_info: filters: load_metrics: true - load_databases: True + load_databases: true value_type: name - value: "{{db_servers.response[0].name}}" + value: "{{ db_servers.response[0].name }}" register: result - - name: get NDB db_servers using it's ip - ntnx_ndb_db_servers_info: + - name: Get NDB db_servers using it's ip + nutanix.ncp.ntnx_ndb_db_servers_info: filters: value_type: ip - value: "{{db_servers.response[0].ipAddresses[0]}}" + value: "{{ db_servers.response[0].ipAddresses[0] }}" register: result - - name: get NDB db_servers using it's name - ntnx_ndb_db_servers_info: - name: "{{db_servers.response[0].name}}" + - name: Get NDB db_servers using it's name + nutanix.ncp.ntnx_ndb_db_servers_info: + name: "{{ db_servers.response[0].name }}" register: result - - name: get NDB db_servers using it's id - ntnx_ndb_db_servers_info: - uuid: "{{db_servers.response[0].id}}" + - name: Get NDB db_servers using it's id + nutanix.ncp.ntnx_ndb_db_servers_info: + uuid: "{{ db_servers.response[0].id }}" register: result - - name: get NDB db_servers using ip - ntnx_ndb_db_servers_info: - server_ip: "{{db_servers.response[0].ipAddresses[0]}}" + - name: Get NDB db_servers using ip + nutanix.ncp.ntnx_ndb_db_servers_info: + server_ip: "{{ db_servers.response[0].ipAddresses[0] }}" register: result ################################### maintenance tasks update tasks ############################# - - name: create spec for adding maintenance window tasks to db server vm - check_mode: yes - ntnx_ndb_maintenance_tasks: + - name: Create spec for adding maintenance window tasks to db server vm + check_mode: true + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" - - uuid: "test_vm_1" + - name: "{{ vm1_name_updated }}" + - uuid: test_vm_1 db_server_clusters: - - uuid: "test_cluster_1" - - uuid: "test_cluster_2" + - uuid: test_cluter_1 + - uuid: test_cluter_2 maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result - - name: create spec for removing maintenance window tasks from above created vm - check_mode: yes - ntnx_ndb_maintenance_tasks: + - name: Create spec for removing maintenance window tasks from above created vm + check_mode: true + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - - name: remove maintenance tasks - ntnx_ndb_maintenance_tasks: + - name: Remove maintenance tasks + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - uuid: "{{db_server_uuid}}" + - uuid: "{{ db_server_uuid }}" maintenance_window: - uuid: "{{maintenance.window_uuid}}" + uuid: "{{ maintenance.window_uuid }}" tasks: [] register: result - - name: Add maintenance window task for vm - ntnx_ndb_maintenance_tasks: + - name: Add maitenance window task for vm + nutanix.ncp.ntnx_ndb_maintenance_tasks: db_server_vms: - - name: "{{vm1_name_updated}}" + - name: "{{ vm1_name_updated }}" maintenance_window: - name: "{{maintenance.window_name}}" + name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" - - type: "DB_PATCHING" - pre_task_cmd: "ls -a" - post_task_cmd: "ls" + - type: OS_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls + - type: DB_PATCHING + pre_task_cmd: ls -a + post_task_cmd: ls register: result ################################### DB server VM unregistration tasks ############################# - - name: generate check mode spec for unregister with default values - check_mode: yes - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + - name: Generate check mode spec for unregister with default values + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" register: result - - name: generate check mode spec for delete vm with vgs and snapshots - check_mode: yes - ntnx_ndb_db_server_vms: - state: "absent" - uuid: "{{db_server_uuid}}" - delete_from_cluster: True - delete_vgs: True - delete_vm_snapshots: True + - name: Genereate check mode spec for delete vm with vgs and snapshots + check_mode: true + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + uuid: "{{ db_server_uuid }}" + delete_from_cluster: true + delete_vgs: true + delete_vm_snapshots: true register: result - - name: unregister vm - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" - delete_from_cluster: False - soft_remove: True - delete_vgs: True - delete_vm_snapshots: True + - name: Unregister vm + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" + delete_from_cluster: false + soft_remove: true + delete_vgs: true + delete_vm_snapshots: true register: result ################################### DB server VM Registration tasks ############################# - - name: generate spec for registration of the previous unregistered vm using check mode - check_mode: yes - ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + - name: Generate spec for registeration of the previous unregistered vm using check mode + check_mode: true + nutanix.ncp.ntnx_ndb_register_db_server_vm: + ip: "{{ vm_ip }}" + desc: register-vm-desc reset_desc_in_ntnx_cluster: true cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: - software_path: "{{postgres.software_home}}" - private_ssh_key: "check-key" - username: "{{vm_username}}" + software_path: "{{ postgres.software_home }}" + private_ssh_key: check-key + username: "{{ vm_username }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" - working_directory: "/check" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F + working_directory: /check register: result - - name: register the previous unregistered vm - ntnx_ndb_register_db_server_vm: - ip: "{{vm_ip}}" - desc: "register-vm-desc" + - name: Register the previous unregistered vm + nutanix.ncp.ntnx_ndb_register_db_server_vm: + ip: "{{ vm_ip }}" + desc: register-vm-desc cluster: - name: "{{cluster.cluster1.name}}" + name: "{{ cluster.cluster1.name }}" postgres: listener_port: 5432 - software_path: "{{postgres.software_home}}" - username: "{{vm_username}}" - password: "{{vm_password}}" + software_path: "{{ postgres.software_home }}" + username: "{{ vm_username }}" + password: "{{ vm_password }}" automated_patching: maintenance_window: name: "{{ maintenance.window_name }}" tasks: - - type: "OS_PATCHING" - pre_task_cmd: "ls" - post_task_cmd: "ls -a" - - type: "DB_PATCHING" - pre_task_cmd: "ls -l" - post_task_cmd: "ls -F" + - type: OS_PATCHING + pre_task_cmd: ls + post_task_cmd: ls -a + - type: DB_PATCHING + pre_task_cmd: ls -l + post_task_cmd: ls -F register: result ################################### DB server VM Delete tasks ############################# - - name: unregister db server vm - ntnx_ndb_db_server_vms: - state: "absent" - wait: True - uuid: "{{db_server_uuid}}" + - name: Unregister db server vm + nutanix.ncp.ntnx_ndb_db_server_vms: + state: absent + wait: true + uuid: "{{ db_server_uuid }}" delete_from_cluster: false - delete_vgs: True - delete_vm_snapshots: True + delete_vgs: true + delete_vm_snapshots: true register: result diff --git a/examples/roles_crud.yml b/examples/roles_crud.yml index b443afccc..d364c804f 100644 --- a/examples/roles_crud.yml +++ b/examples/roles_crud.yml @@ -1,8 +1,6 @@ - name: Roles crud playbook. Here we will create, update, read and delete the role. hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -10,12 +8,12 @@ nutanix_password: validate_certs: false tasks: - - name: get some permissions for adding in roles - ntnx_permissions_info: + - name: Get some permissions for adding in roles + nutanix.ncp.ntnx_permissions_info: register: permissions - - name: Create a role with 2 permissions. Here we will be using name or uuid for referencing permissions - ntnx_roles: + - name: Create a role with 2 permissions. Here we will be using name or uuid for referenceing permissions + nutanix.ncp.ntnx_roles: state: present name: test-ansible-role-1 desc: @@ -26,7 +24,7 @@ register: role1 - name: Update role - ntnx_roles: + nutanix.ncp.ntnx_roles: state: present role_uuid: "{{ role1.role_uuid }}" name: test-ansible-role-1 @@ -36,16 +34,16 @@ register: updated_role1 - name: Read the updated role - ntnx_roles_info: + nutanix.ncp.ntnx_roles_info: role_uuid: "{{ updated_role1.role_uuid }}" register: role1_info - name: Print the role details - debug: + ansible.builtin.debug: msg: "{{role1_info}}" - name: Delete the role. - ntnx_roles: + nutanix.ncp.ntnx_roles: state: absent role_uuid: "{{ updated_role1.role_uuid }}" wait: true diff --git a/examples/vm.yml b/examples/vm.yml index 079409f61..e6c83b471 100644 --- a/examples/vm.yml +++ b/examples/vm.yml @@ -2,8 +2,6 @@ - name: VM playbook hosts: localhost gather_facts: false - collections: - - nutanix.ncp module_defaults: group/nutanix.ncp.ntnx: nutanix_host: @@ -12,7 +10,7 @@ validate_certs: false tasks: - name: Setting Variables - set_fact: + ansible.builtin.set_fact: cluster_name: "" script_path: "" subnet_name: "" @@ -21,7 +19,8 @@ fqdn: "" - name: Create Cloud-init Script file - copy: + ansible.builtin.copy: + mode: "0644" dest: "cloud_init.yml" content: | #cloud-config @@ -31,8 +30,8 @@ expire: False fqdn: "{{ fqdn }}" - - name: create Vm - ntnx_vms: + - name: Create Vm + nutanix.ncp.ntnx_vms: state: present name: "ansible_automation_demo" desc: "ansible_vm_description" @@ -42,10 +41,10 @@ cluster: name: "{{cluster_name}}" networks: - - is_connected: True + - is_connected: true subnet: name: "{{ subnet_name }}" - # mention cluster only when there are multiple subnets with same name across clusters + # mention cluster only when there are multiple subnets with same name accross clusters # and subnet name is set above cluster: name: "{{cluster_name}}" @@ -61,14 +60,14 @@ guest_customization: type: "cloud_init" script_path: "./cloud_init.yml" - is_overridable: True + is_overridable: true register: output - - name: output of vm created - debug: + - name: Output of vm created + ansible.builtin.debug: msg: "{{ output }}" - - name: delete VM - ntnx_vms: + - name: Delete VM + nutanix.ncp.ntnx_vms: state: absent vm_uuid: "{{output.vm_uuid}}"