From 4757dd4262db9ce2c4743aebb8bfb5486e29b6a4 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Thu, 26 May 2022 19:12:05 -0400 Subject: [PATCH] refactor: Remove `aws-resource-tags` module and its usage from blueprints (#551) --- .github/workflows/e2e-parallel-destroy.yml | 95 ++----- .github/workflows/e2e-parallel-full.yml | 121 ++------- .pre-commit-config.yaml | 2 +- README.md | 7 +- data.tf | 9 +- deploy/e2e-template/README.md | 1 - deploy/e2e-template/backend.conf | 3 - deploy/e2e-template/base.tfvars | 3 - deploy/e2e-template/main.tf | 11 - deploy/e2e-template/outputs.tf | 0 deploy/e2e-template/variables.tf | 14 - deploy/e2e-template/versions.tf | 12 - docs/getting-started.md | 4 +- docs/internal/ci.md | 2 +- docs/node-groups.md | 16 +- examples/analytics/emr-on-eks/README.md | 2 - examples/analytics/emr-on-eks/main.tf | 147 ++++++----- examples/analytics/emr-on-eks/variables.tf | 17 -- examples/analytics/emr-on-eks/versions.tf | 9 +- examples/analytics/spark-k8s-operator/main.tf | 149 ++++++----- .../analytics/spark-k8s-operator/variables.tf | 17 -- .../analytics/spark-k8s-operator/versions.tf | 9 +- examples/aws-efs-csi-driver/main.tf | 143 ++++++----- examples/aws-efs-csi-driver/variables.tf | 17 -- examples/aws-efs-csi-driver/versions.tf | 9 +- examples/ci-cd/gitlab-ci-cd/README.md | 5 +- examples/ci-cd/gitlab-ci-cd/main.tf | 98 +++---- examples/ci-cd/gitlab-ci-cd/variables.tf | 17 -- examples/complete-kubernetes-addons/main.tf | 138 +++++----- .../complete-kubernetes-addons/variables.tf | 17 -- .../complete-kubernetes-addons/versions.tf | 9 +- examples/crossplane/main.tf | 114 +++++---- examples/crossplane/variables.tf | 17 -- examples/crossplane/versions.tf | 9 +- .../eks-cluster-with-external-dns/main.tf | 137 +++++----- .../variables.tf | 3 +- .../eks-cluster-with-external-dns/versions.tf | 9 +- examples/eks-cluster-with-new-vpc/main.tf | 111 ++++---- examples/eks-cluster-with-new-vpc/outputs.tf | 6 +- .../eks-cluster-with-new-vpc/variables.tf | 19 +- examples/eks-cluster-with-new-vpc/versions.tf | 9 +- examples/fully-private-eks-cluster/main.tf | 239 ++++++++---------- .../fully-private-eks-cluster/variables.tf | 17 -- .../fully-private-eks-cluster/versions.tf | 9 +- .../game-tech/agones-game-controller/main.tf | 146 +++++------ .../agones-game-controller/variables.tf | 17 -- .../agones-game-controller/versions.tf | 9 +- examples/gitops/argocd/main.tf | 186 ++++++-------- examples/gitops/argocd/variables.tf | 17 -- examples/gitops/argocd/versions.tf | 9 +- examples/ingress-controllers/nginx/README.md | 16 +- examples/ingress-controllers/nginx/main.tf | 131 +++++----- .../ingress-controllers/nginx/variables.tf | 17 -- .../ingress-controllers/nginx/versions.tf | 9 +- examples/ipv6-eks-cluster/main.tf | 134 +++++----- examples/ipv6-eks-cluster/versions.tf | 9 +- examples/karpenter/main.tf | 190 +++++++------- examples/karpenter/variables.tf | 17 -- examples/karpenter/versions.tf | 9 +- examples/multi-tenancy-with-teams/main.tf | 117 +++++---- .../multi-tenancy-with-teams/variables.tf | 17 -- examples/multi-tenancy-with-teams/versions.tf | 9 +- examples/node-groups/fargate-profiles/main.tf | 108 ++++---- .../node-groups/fargate-profiles/versions.tf | 9 +- .../node-groups/managed-node-groups/main.tf | 143 ++++++----- .../managed-node-groups/variables.tf | 17 -- .../managed-node-groups/versions.tf | 9 +- .../self-managed-node-groups/main.tf | 122 +++++---- .../self-managed-node-groups/variables.tf | 17 -- .../self-managed-node-groups/versions.tf | 9 +- .../node-groups/windows-node-groups/README.md | 4 +- .../node-groups/windows-node-groups/main.tf | 122 ++++----- .../windows-node-groups/variables.tf | 17 -- .../windows-node-groups/versions.tf | 9 +- .../adot-amp-grafana-for-haproxy/main.tf | 111 ++++---- .../adot-amp-grafana-for-haproxy/variables.tf | 18 -- .../adot-amp-grafana-for-haproxy/versions.tf | 9 +- .../adot-amp-grafana-for-java/main.tf | 113 +++++---- .../adot-amp-grafana-for-java/variables.tf | 18 -- .../adot-amp-grafana-for-java/versions.tf | 9 +- .../adot-amp-grafana-for-memcached/main.tf | 110 ++++---- .../variables.tf | 18 -- .../versions.tf | 9 +- .../adot-amp-grafana-for-nginx/main.tf | 121 ++++----- .../adot-amp-grafana-for-nginx/variables.tf | 18 -- .../adot-amp-grafana-for-nginx/versions.tf | 9 +- .../observability/amp-amg-opensearch/main.tf | 128 +++++----- .../amp-amg-opensearch/versions.tf | 9 +- examples/tls-with-aws-pca-issuer/main.tf | 138 +++++----- examples/tls-with-aws-pca-issuer/variables.tf | 18 -- examples/tls-with-aws-pca-issuer/versions.tf | 13 +- locals.tf | 19 +- main.tf | 28 +- modules/aws-eks-managed-node-groups/locals.tf | 8 +- .../locals.tf | 4 +- modules/aws-eks-teams/README.md | 3 - modules/aws-eks-teams/locals.tf | 1 - modules/aws-eks-teams/main.tf | 27 +- modules/aws-eks-teams/variables.tf | 15 -- modules/aws-resource-tags/README.md | 37 --- modules/aws-resource-tags/main.tf | 21 -- modules/aws-resource-tags/outputs.tf | 9 - modules/aws-resource-tags/variables.tf | 32 --- modules/aws-resource-tags/versions.tf | 3 - modules/kubernetes-addons/argocd/locals.tf | 3 +- .../aws-load-balancer-controller/locals.tf | 2 +- modules/kubernetes-addons/helm-addon/main.tf | 2 +- .../kubernetes-addons/ingress-nginx/locals.tf | 1 + modules/launch-templates/README.md | 10 +- modules/launch-templates/locals.tf | 2 +- outputs.tf | 5 + test/src/eks_blueprints_e2e_test.go | 12 +- variables.tf | 39 +-- 113 files changed, 2011 insertions(+), 2588 deletions(-) delete mode 100644 deploy/e2e-template/README.md delete mode 100644 deploy/e2e-template/backend.conf delete mode 100644 deploy/e2e-template/base.tfvars delete mode 100644 deploy/e2e-template/main.tf delete mode 100644 deploy/e2e-template/outputs.tf delete mode 100644 deploy/e2e-template/variables.tf delete mode 100644 deploy/e2e-template/versions.tf delete mode 100644 modules/aws-resource-tags/README.md delete mode 100644 modules/aws-resource-tags/main.tf delete mode 100644 modules/aws-resource-tags/outputs.tf delete mode 100644 modules/aws-resource-tags/variables.tf delete mode 100644 modules/aws-resource-tags/versions.tf diff --git a/.github/workflows/e2e-parallel-destroy.yml b/.github/workflows/e2e-parallel-destroy.yml index aa8a41c10d..56caae8f30 100644 --- a/.github/workflows/e2e-parallel-destroy.yml +++ b/.github/workflows/e2e-parallel-destroy.yml @@ -8,9 +8,6 @@ on: required: true default: 'true' -env: - DEFAULT_DEPLOY_ORDER: 'module.e2e_test.module.aws_vpc,module.e2e_test.module.eks_blueprints,module.e2e_test.module.eks_blueprints_kubernetes_addons' - concurrency: e2e-parallel-destroy jobs: @@ -27,82 +24,30 @@ jobs: matrix: include: - example_path: examples/analytics/emr-on-eks - tenant_name: 'emr-eks' - example_path: examples/analytics/spark-k8s-operator - tenant_name: 'spark' - - example_path: examples/gitops/argocd - tenant_name: 'argo' - example_path: examples/aws-efs-csi-driver - tenant_name: 'efs' - example_path: examples/crossplane - tenant_name: 'crossplane' - example_path: examples/eks-cluster-with-new-vpc - tenant_name: 'new-vpc' - example_path: examples/fully-private-eks-cluster - tenant_name: 'private' - deployment_order: - [ - 'module.e2e_test.module.aws_vpc', - 'module.e2e_test.module.vpc_endpoint_gateway', - 'module.e2e_test.module.vpc_endpoints', - 'module.e2e_test.module.eks_blueprints', - ] - example_path: examples/game-tech/agones-game-controller - tenant_name: 'agones' - - example_path: examples/ingress-controllers/nginx - tenant_name: 'nginx' - deployment_order: - [ - 'module.e2e_test.module.aws_vpc', - 'module.e2e_test.module.eks_blueprints', - 'module.e2e_test.module.eks_blueprints_kubernetes_addons', - 'module.e2e_test.module.aws_load_balancer_controller', - 'module.e2e_test.module.ingress_nginx', - ] + - example_path: examples/gitops/argocd + # - example_path: examples/ingress-controllers/nginx # ignoring due to https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1629 + - example_path: examples/ipv6-eks-cluster - example_path: examples/karpenter - tenant_name: 'karpenter' - - example_path: examples/node-groups/managed-node-groups - tenant_name: 'managed' - example_path: examples/multi-tenancy-with-teams - tenant_name: 'teams' + - example_path: examples/node-groups/fargate-profiles + - example_path: examples/node-groups/managed-node-groups - example_path: examples/node-groups/self-managed-node-groups - tenant_name: 'self' - example_path: examples/node-groups/windows-node-groups - tenant_name: 'windows' steps: - name: Checkout uses: actions/checkout@v2 - - name: Pre Setup - id: pre-setup - run: | - mkdir -p deploy/e2e-test - if [[ ${{ matrix.example_path }} == deploy/* ]] - then - echo "Skipping pre-setup for ${{ matrix.example_path }}" - cp -R ${{ matrix.example_path }}/* deploy/e2e-test/ - else - echo "Running pre-setup for ${{ matrix.example_path }}" - cp -R deploy/e2e-template/* deploy/e2e-test/ - sed -i "s!REPLACE_ME!${{ matrix.tenant_name }}!g" deploy/e2e-test/base.tfvars - sed -i "s!TF_STATE_PATH!${{ matrix.example_path }}!g" deploy/e2e-test/backend.conf - sed -i "s!EXAMPLE_PATH!${{ matrix.example_path }}!g" deploy/e2e-test/main.tf - fi - - - name: Setup Deployment Order - id: setup-deployment-order - run: | - export MATRIX_DEPLOYMENT_ORDER=${{join(matrix.deployment_order,',') }} + - name: Setup backend + # Un-comment remote backend for use in workflow + run: sed -i "s/# //g" ${{ matrix.example_path }}/versions.tf - if [ -z "$MATRIX_DEPLOYMENT_ORDER" ] - then - DEPLOYMENT_ORDER=${{env.DEFAULT_DEPLOY_ORDER}} - else - DEPLOYMENT_ORDER=$MATRIX_DEPLOYMENT_ORDER - fi - echo "DEPLOYMENT_ORDER=$DEPLOYMENT_ORDER" >> $GITHUB_ENV - - - name: Configure AWS credentials from Test account + - name: Auth AWS uses: aws-actions/configure-aws-credentials@v1 with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} @@ -110,25 +55,15 @@ jobs: role-duration-seconds: 3600 role-session-name: GithubActions-Session - - name: Terraform Job + - name: Setup Terraform uses: hashicorp/setup-terraform@v1 with: terraform_version: 1.0.0 - - name: Terraform Init - id: init - run: terraform init -backend-config backend.conf -reconfigure - working-directory: deploy/e2e-test - - name: Terraform Destroy - id: destroy - working-directory: deploy/e2e-test + working-directory: ${{ matrix.example_path }} run: | - reverse_array=$(echo ${{ env.DEPLOYMENT_ORDER }} | awk -F, '{for (i=NF; i>0; --i) printf "%s%s", (i> $GITHUB_ENV + - name: Setup backend + # Un-comment remote backend for use in workflow + run: sed -i "s/# //g" ${{ matrix.example_path }}/versions.tf - - name: Configure AWS credentials from Test account + - name: Auth AWS uses: aws-actions/configure-aws-credentials@v1 with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} @@ -109,58 +55,29 @@ jobs: role-duration-seconds: 3600 role-session-name: GithubActions-Session - - name: Terraform Job + - name: Setup Terraform uses: hashicorp/setup-terraform@v1 with: terraform_version: 1.0.0 - - name: Terraform Fmt - id: fmt - run: terraform fmt -check -recursive -list -no-color - - - name: Terraform Init - id: init - run: terraform init -backend-config backend.conf -reconfigure - working-directory: deploy/e2e-test - - - name: Terraform Validate - id: validate - working-directory: deploy/e2e-test - run: terraform validate -no-color - - - name: Terraform Plan - id: plan - working-directory: deploy/e2e-test - run: terraform plan -var-file base.tfvars -no-color - - name: Terraform Apply id: apply - working-directory: deploy/e2e-test + working-directory: ${{ matrix.example_path }} run: | - IFS=',' read -r -a array <<< "${{ env.DEPLOYMENT_ORDER }}" - for element in "${array[@]}" - do - terraform apply -target="$element" -var-file base.tfvars -no-color -input=false -auto-approve - done - terraform apply -var-file base.tfvars -no-color -input=false -auto-approve + terraform init -upgrade=true + terraform apply -no-color -input=false -auto-approve - name: Terraform Destroy if: github.event.inputs.TFDestroy == 'true' && (steps.apply.outcome == 'success' || steps.apply.outcome == 'failure') - id: destroy - working-directory: deploy/e2e-test + working-directory: ${{ matrix.example_path }} run: | - reverse_array=$(echo ${{ env.DEPLOYMENT_ORDER }} | awk -F, '{for (i=NF; i>0; --i) printf "%s%s", (i [aws\_eks\_self\_managed\_node\_groups](#module\_aws\_eks\_self\_managed\_node\_groups) | ./modules/aws-eks-self-managed-node-groups | n/a | | [aws\_eks\_teams](#module\_aws\_eks\_teams) | ./modules/aws-eks-teams | n/a | | [aws\_managed\_prometheus](#module\_aws\_managed\_prometheus) | ./modules/aws-managed-prometheus | n/a | -| [eks\_tags](#module\_eks\_tags) | ./modules/aws-resource-tags | n/a | | [emr\_on\_eks](#module\_emr\_on\_eks) | ./modules/emr-on-eks | n/a | | [kms](#module\_kms) | ./modules/aws-kms | n/a | @@ -193,7 +192,6 @@ If you are interested in contributing to EKS Blueprints, see the [Contribution g | [enable\_emr\_on\_eks](#input\_enable\_emr\_on\_eks) | Enable EMR on EKS | `bool` | `false` | no | | [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no | | [enable\_windows\_support](#input\_enable\_windows\_support) | Enable Windows support | `bool` | `false` | no | -| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | `"preprod"` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Fargate profile configuration | `any` | `{}` | no | | [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | | [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no | @@ -207,17 +205,13 @@ If you are interested in contributing to EKS Blueprints, see the [Contribution g | [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no | | [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | -| [org](#input\_org) | tenant, which could be your organization name, e.g. aws' | `string` | `""` | no | | [platform\_teams](#input\_platform\_teams) | Map of maps of platform teams to create | `any` | `{}` | no | | [private\_subnet\_ids](#input\_private\_subnet\_ids) | List of private subnets Ids for the cluster and worker nodes | `list(string)` | `[]` | no | | [public\_subnet\_ids](#input\_public\_subnet\_ids) | List of public subnets Ids for the worker nodes | `list(string)` | `[]` | no | | [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Self-managed node groups configuration | `any` | `{}` | no | | [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit`,`XYZ`) | `map(string)` | `{}` | no | -| [tenant](#input\_tenant) | Account name or unique account id e.g., apps or management or aws007 | `string` | `"aws"` | no | -| [terraform\_version](#input\_terraform\_version) | Terraform version | `string` | `"Terraform"` | no | | [vpc\_id](#input\_vpc\_id) | VPC Id | `string` | n/a | yes | | [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no | -| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | `"dev"` | no | ## Outputs @@ -233,6 +227,7 @@ If you are interested in contributing to EKS Blueprints, see the [Contribution g | [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | Endpoint for your Kubernetes API server | | [eks\_cluster\_id](#output\_eks\_cluster\_id) | Amazon EKS Cluster Name | | [eks\_cluster\_status](#output\_eks\_cluster\_status) | Amazon EKS Cluster Status | +| [eks\_cluster\_version](#output\_eks\_cluster\_version) | The Kubernetes version for the cluster | | [eks\_oidc\_issuer\_url](#output\_eks\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer | | [eks\_oidc\_provider\_arn](#output\_eks\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. | | [emr\_on\_eks\_role\_arn](#output\_emr\_on\_eks\_role\_arn) | IAM execution role ARN for EMR on EKS | diff --git a/data.tf b/data.tf index e1950b7509..62a282d881 100644 --- a/data.tf +++ b/data.tf @@ -86,10 +86,7 @@ data "aws_iam_policy_document" "eks_key" { type = "AWS" identifiers = concat( var.cluster_kms_key_additional_admin_arns, - [ - "arn:${local.context.aws_partition_id}:iam::${local.context.aws_caller_identity_account_id}:role/${local.cluster_iam_role_name}", - data.aws_iam_session_context.current.issuer_arn - ] + [data.aws_iam_session_context.current.issuer_arn] ) } } @@ -109,7 +106,7 @@ data "aws_iam_policy_document" "eks_key" { principals { type = "AWS" identifiers = [ - "arn:${local.context.aws_partition_id}:iam::${local.context.aws_caller_identity_account_id}:role/${local.cluster_iam_role_name}" + local.cluster_iam_role_arn ] } } @@ -129,7 +126,7 @@ data "aws_iam_policy_document" "eks_key" { principals { type = "AWS" identifiers = [ - "arn:${local.context.aws_partition_id}:iam::${local.context.aws_caller_identity_account_id}:role/${local.cluster_iam_role_name}" + local.cluster_iam_role_arn ] } diff --git a/deploy/e2e-template/README.md b/deploy/e2e-template/README.md deleted file mode 100644 index 8f04b05adb..0000000000 --- a/deploy/e2e-template/README.md +++ /dev/null @@ -1 +0,0 @@ -# Usage diff --git a/deploy/e2e-template/backend.conf b/deploy/e2e-template/backend.conf deleted file mode 100644 index 20670de67e..0000000000 --- a/deploy/e2e-template/backend.conf +++ /dev/null @@ -1,3 +0,0 @@ -bucket = "terraform-ssp-github-actions-state" -region = "us-west-2" -key = "e2e/TF_STATE_PATH.tfstate" diff --git a/deploy/e2e-template/base.tfvars b/deploy/e2e-template/base.tfvars deleted file mode 100644 index d38e677a56..0000000000 --- a/deploy/e2e-template/base.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -tenant = "REPLACE_ME" -environment = "preprod" -zone = "test" diff --git a/deploy/e2e-template/main.tf b/deploy/e2e-template/main.tf deleted file mode 100644 index b2842f330a..0000000000 --- a/deploy/e2e-template/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - region = "us-west-2" -} - -module "e2e_test" { - source = "../../EXAMPLE_PATH" - - tenant = var.tenant - environment = var.environment - zone = var.zone -} diff --git a/deploy/e2e-template/outputs.tf b/deploy/e2e-template/outputs.tf deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/deploy/e2e-template/variables.tf b/deploy/e2e-template/variables.tf deleted file mode 100644 index 0b273ac2f8..0000000000 --- a/deploy/e2e-template/variables.tf +++ /dev/null @@ -1,14 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" -} - -variable "environment" { - type = string - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." -} diff --git a/deploy/e2e-template/versions.tf b/deploy/e2e-template/versions.tf deleted file mode 100644 index e10a6b663e..0000000000 --- a/deploy/e2e-template/versions.tf +++ /dev/null @@ -1,12 +0,0 @@ -terraform { - required_version = ">= 1.0.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.72" - } - } - - backend "s3" {} -} diff --git a/docs/getting-started.md b/docs/getting-started.md index a20ccd9ab1..6cde3993ca 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -56,7 +56,7 @@ We will leverage Terraform's [target](https://learn.hashicorp.com/tutorials/terr **Deploy the VPC**. This step will take roughly 3 minutes to complete. ``` -terraform apply -target="module.aws_vpc" +terraform apply -target="module.vpc" ``` **Deploy the EKS cluster**. This step will take roughly 14 minutes to complete. @@ -145,5 +145,5 @@ terraform destroy -target="module.eks_blueprints" Destroy the VPC. ``` -terraform destroy -target="module.aws_vpc" +terraform destroy -target="module.vpc" ``` diff --git a/docs/internal/ci.md b/docs/internal/ci.md index 08a5dc2625..4ad0d89c6c 100644 --- a/docs/internal/ci.md +++ b/docs/internal/ci.md @@ -1,6 +1,6 @@ # E2E tests -We use GitHub Actions to run an end-to-end tests to verify all PRs. The GitHub Actions used are a combination of `aws-actions/configure-aws-credentials` and `hashicorp/setup-terraform@v1`. See the complete action definition [here](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/.github/workflows/e2e-test.yml). +We use GitHub Actions to run an end-to-end tests to verify all PRs. The GitHub Actions used are a combination of `aws-actions/configure-aws-credentials` and `hashicorp/setup-terraform@v1`. See the complete action definition [here](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/.github/workflows/e2e-terratest.yml). ## Setup diff --git a/docs/node-groups.md b/docs/node-groups.md index d06d07d640..0655b400d5 100644 --- a/docs/node-groups.md +++ b/docs/node-groups.md @@ -103,7 +103,7 @@ The below example demonstrates advanced configuration options for a managed node { device_name = "/dev/xvdf" # mount point to /local1 (it could be local2, depending upon the disks are attached during boot) volume_type = "gp3" # The volume type. Can be standard, gp2, gp3, io1, io2, sc1 or st1 (Default: gp3). - volume_size = "100" + volume_size = 100 delete_on_termination = true encrypted = true kms_key_id = "" # Custom KMS Key can be used to encrypt the disk @@ -467,10 +467,10 @@ The below example demonstrates the minimum configuration required to deploy a Se ```hcl # EKS SELF MANAGED NODE GROUPS self_managed_node_groups = { - self_mg_4 = { + self_mg_5 = { node_group_name = "self-managed-ondemand" launch_template_os = "amazonlinux2eks" - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } ``` @@ -481,7 +481,7 @@ The below example demonstrates advanced configuration options for a self-managed ```hcl self_managed_node_groups = { - self_mg_4 = { + self_mg_5 = { node_group_name = "self-managed-ondemand" instance_type = "m5.large" custom_ami_id = "ami-0dfaa019a300f219c" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. @@ -524,8 +524,8 @@ The below example demonstrates advanced configuration options for a self-managed public_ip = false # Enable only for public subnets # AUTOSCALING - max_size = "3" - min_size = "1" + max_size = 3 + min_size = 1 subnet_ids = [] # Mandatory Public or Private Subnet IDs additional_tags = { ExtraTag = "m5x-on-demand" @@ -618,8 +618,8 @@ The example below demonstrates the minimum configuration required to deploy a Se node_group_name = "ng-od-windows" launch_template_os = "windows" instance_type = "m5n.large" - subnet_ids = module.aws_vpc.private_subnets - min_size = "2" + subnet_ids = module.vpc.private_subnets + min_size = 2 } } ``` diff --git a/examples/analytics/emr-on-eks/README.md b/examples/analytics/emr-on-eks/README.md index e1b6f8be1c..0433af74f4 100644 --- a/examples/analytics/emr-on-eks/README.md +++ b/examples/analytics/emr-on-eks/README.md @@ -2,8 +2,6 @@ This example deploys the following resources -- Creates a new sample VPC, 3 Private Subnets and 3 Public Subnets -- Creates Internet gateway for Public Subnets and NAT Gateway for Private Subnets - Creates EKS Cluster Control plane with public endpoint (for demo purpose only) with one managed node group - Deploys Metrics server, Cluster Autoscaler, Prometheus and EMR on EKS Addon - Creates Amazon managed Prometheus and configures Prometheus addon to remote write metrics to AMP diff --git a/examples/analytics/emr-on-eks/main.tf b/examples/analytics/emr-on-eks/main.tf index 9b3b8c6dc0..d6dba01c57 100644 --- a/examples/analytics/emr-on-eks/main.tf +++ b/examples/analytics/emr-on-eks/main.tf @@ -31,73 +31,41 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" instance_types = ["m5.xlarge"] - min_size = "3" - subnet_ids = module.aws_vpc.private_subnets + min_size = 3 + subnet_ids = module.vpc.private_subnets } } + + enable_amazon_prometheus = true + #--------------------------------------- # ENABLE EMR ON EKS # 1. Creates namespace @@ -107,7 +75,6 @@ module "eks_blueprints" { # 5. Create a trust relationship between the job execution role and the identity of the EMR managed service account #--------------------------------------- enable_emr_on_eks = true - emr_on_eks_teams = { data_team_a = { emr_on_eks_namespace = "emr-data-team-a" @@ -118,52 +85,84 @@ module "eks_blueprints" { emr_on_eks_iam_role_name = "emr-eks-data-team-b" } } - # Enable Amazon Prometheus - Creates a new Workspace id - enable_amazon_prometheus = true + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" eks_cluster_id = module.eks_blueprints.eks_cluster_id - #K8s Add-ons + + # Add-ons enable_metrics_server = true enable_cluster_autoscaler = true - #--------------------------------------- - # PROMETHEUS and Amazon Prometheus Config - #--------------------------------------- - # Amazon Prometheus Configuration to integrate with Prometheus Server Add-on enable_amazon_prometheus = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint - # Enabling Prometheus Server Add-on enable_prometheus = true - # Optional Map value prometheus_helm_config = { - name = "prometheus" # (Required) Release name. - repository = "https://prometheus-community.github.io/helm-charts" # (Optional) Repository URL where to locate the requested chart. - chart = "prometheus" # (Required) Chart name to be installed. + name = "prometheus" + repository = "https://prometheus-community.github.io/helm-charts" + chart = "prometheus" version = "15.3.0" - # (Optional) Specify the exact chart version to install. - namespace = "prometheus" # (Optional) The namespace to install the release into. + namespace = "prometheus" values = [templatefile("${path.module}/helm_values/prometheus-values.yaml", { operating_system = "linux" })] } - #--------------------------------------- - # Vertical Pod Autoscaling - #--------------------------------------- - enable_vpa = true + enable_vpa = true vpa_helm_config = { - name = "vpa" # (Required) Release name. - repository = "https://charts.fairwinds.com/stable" # (Optional) Repository URL where to locate the requested chart. - chart = "vpa" # (Required) Chart name to be installed. - version = "1.0.0" # (Optional) Specify the exact chart version to install - namespace = "vpa" # (Optional) The namespace to install the release into. + name = "vpa" + repository = "https://charts.fairwinds.com/stable" + chart = "vpa" + version = "1.0.0" + namespace = "vpa" values = [templatefile("${path.module}/helm_values/vpa-values.yaml", {})] } + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/analytics/emr-on-eks/variables.tf b/examples/analytics/emr-on-eks/variables.tf index b9ffe515bd..e69de29bb2 100644 --- a/examples/analytics/emr-on-eks/variables.tf +++ b/examples/analytics/emr-on-eks/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "test" -} diff --git a/examples/analytics/emr-on-eks/versions.tf b/examples/analytics/emr-on-eks/versions.tf index df9b71a0f0..aa42f3c7b8 100644 --- a/examples/analytics/emr-on-eks/versions.tf +++ b/examples/analytics/emr-on-eks/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/emr-on-eks/terraform.tfstate" + # } } diff --git a/examples/analytics/spark-k8s-operator/main.tf b/examples/analytics/spark-k8s-operator/main.tf index 1702618bbb..82ca78912f 100644 --- a/examples/analytics/spark-k8s-operator/main.tf +++ b/examples/analytics/spark-k8s-operator/main.tf @@ -31,64 +31,30 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + #----------------------------------------------------------------------------------------------------------# # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks). # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html. @@ -105,7 +71,7 @@ module "eks_blueprints" { type = "ingress" self = true } - #Recommended outbound traffic for Node groups + # Recommended outbound traffic for Node groups egress_all = { description = "Node all egress" protocol = "-1" @@ -128,55 +94,45 @@ module "eks_blueprints" { } } - # EKS MANAGED NODE GROUPS managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" instance_types = ["m5.xlarge"] min_size = 3 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } - # Enable Amazon Prometheus - Creates a new Workspace id enable_amazon_prometheus = true + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - #K8s Add-ons + # Add-ons enable_metrics_server = true enable_cluster_autoscaler = true - #--------------------------------------- - # PROMETHEUS and Amazon Prometheus Config - #--------------------------------------- - # Amazon Prometheus Configuration to integrate with Prometheus Server Add-on enable_amazon_prometheus = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint - #--------------------------------------- - # COMMUNITY PROMETHEUS ENABLE - #--------------------------------------- enable_prometheus = true - # Optional Map value prometheus_helm_config = { - name = "prometheus" # (Required) Release name. - repository = "https://prometheus-community.github.io/helm-charts" # (Optional) Repository URL where to locate the requested chart. - chart = "prometheus" # (Required) Chart name to be installed. - version = "15.3.0" # (Optional) Specify the exact chart version to install. - namespace = "prometheus" # (Optional) The namespace to install the release into. + name = "prometheus" + repository = "https://prometheus-community.github.io/helm-charts" + chart = "prometheus" + version = "15.3.0" + namespace = "prometheus" values = [templatefile("${path.module}/helm_values/prometheus-values.yaml", { operating_system = "linux" })] } - #--------------------------------------- - # ENABLE SPARK on K8S OPERATOR - #--------------------------------------- + enable_spark_k8s_operator = true - # Optional Map value spark_k8s_operator_helm_config = { name = "spark-operator" chart = "spark-operator" @@ -187,17 +143,56 @@ module "eks_blueprints_kubernetes_addons" { create_namespace = true values = [templatefile("${path.module}/helm_values/spark-k8s-operator-values.yaml", {})] } - #--------------------------------------- - # Apache YuniKorn K8s Spark Scheduler - #--------------------------------------- + enable_yunikorn = true yunikorn_helm_config = { - name = "yunikorn" # (Required) Release name. - repository = "https://apache.github.io/yunikorn-release" # (Optional) Repository URL where to locate the requested chart. - chart = "yunikorn" # (Required) Chart name to be installed. - version = "0.12.2" # (Optional) Specify the exact chart version to install. + name = "yunikorn" + repository = "https://apache.github.io/yunikorn-release" + chart = "yunikorn" + version = "0.12.2" values = [templatefile("${path.module}/helm_values/yunikorn-values.yaml", {})] } + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/analytics/spark-k8s-operator/variables.tf b/examples/analytics/spark-k8s-operator/variables.tf index f9745d19b4..e69de29bb2 100644 --- a/examples/analytics/spark-k8s-operator/variables.tf +++ b/examples/analytics/spark-k8s-operator/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "spark" -} diff --git a/examples/analytics/spark-k8s-operator/versions.tf b/examples/analytics/spark-k8s-operator/versions.tf index df9b71a0f0..6683962305 100644 --- a/examples/analytics/spark-k8s-operator/versions.tf +++ b/examples/analytics/spark-k8s-operator/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/spark-k8s-operator/terraform.tfstate" + # } } diff --git a/examples/aws-efs-csi-driver/main.tf b/examples/aws-efs-csi-driver/main.tf index f390a17257..747f83dc95 100644 --- a/examples/aws-efs-csi-driver/main.tf +++ b/examples/aws-efs-csi-driver/main.tf @@ -31,75 +31,39 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - min_size = "2" - subnet_ids = module.aws_vpc.private_subnets + instance_types = ["m5.large"] + min_size = 2 + subnet_ids = module.vpc.private_subnets } } - # FARGATE fargate_profiles = { default = { fargate_profile_name = "default" @@ -112,59 +76,102 @@ module "eks_blueprints" { env = "fargate" } }] - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets additional_tags = { ExtraTag = "Fargate" } } } + + tags = local.tags } -#--------------------------------------------- -# Deploy Kubernetes Add-ons with sub module -#--------------------------------------------- module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + source = "../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons enable_amazon_eks_coredns = true enable_amazon_eks_kube_proxy = true - # K8s Add-ons + # Add-ons enable_aws_load_balancer_controller = true enable_metrics_server = true enable_cluster_autoscaler = true enable_aws_efs_csi_driver = true + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } -#-------------- -# Deploy EFS -#-------------- +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} + resource "aws_efs_file_system" "efs" { creation_token = "efs" encrypted = true + + tags = local.tags } resource "aws_efs_mount_target" "efs_mt" { - count = length(module.aws_vpc.private_subnets) + count = length(module.vpc.private_subnets) + file_system_id = aws_efs_file_system.efs.id - subnet_id = module.aws_vpc.private_subnets[count.index] - security_groups = [aws_security_group.efs_sg.id] + subnet_id = module.vpc.private_subnets[count.index] + security_groups = [aws_security_group.efs.id] } -resource "aws_security_group" "efs_sg" { - name = "efs-sg" +resource "aws_security_group" "efs" { + name = "${local.name}-efs" description = "Allow inbound NFS traffic from private subnets of the VPC" - vpc_id = module.aws_vpc.vpc_id + vpc_id = module.vpc.vpc_id ingress { description = "Allow NFS 2049/tcp" - cidr_blocks = module.aws_vpc.private_subnets_cidr_blocks + cidr_blocks = module.vpc.private_subnets_cidr_blocks from_port = 2049 to_port = 2049 protocol = "tcp" } + + tags = local.tags } diff --git a/examples/aws-efs-csi-driver/variables.tf b/examples/aws-efs-csi-driver/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/aws-efs-csi-driver/variables.tf +++ b/examples/aws-efs-csi-driver/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/aws-efs-csi-driver/versions.tf b/examples/aws-efs-csi-driver/versions.tf index df9b71a0f0..63f8519de9 100644 --- a/examples/aws-efs-csi-driver/versions.tf +++ b/examples/aws-efs-csi-driver/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/aws-efs-csi-driver/terraform.tfstate" + # } } diff --git a/examples/ci-cd/gitlab-ci-cd/README.md b/examples/ci-cd/gitlab-ci-cd/README.md index 97fde07058..36ef6653d2 100644 --- a/examples/ci-cd/gitlab-ci-cd/README.md +++ b/examples/ci-cd/gitlab-ci-cd/README.md @@ -28,9 +28,8 @@ git@github.com:aws-ia/terraform-aws-eks-blueprints.git ## Step 4: Update variables in input.tfvars file -1. Update tenant,environment,zone as per your requirement -2. Update cluster_version to any version > "1.20" -3. Update CIDR of your VPC, vpc_cidcr = "10.2.0.0/16" +1. Update cluster_version to any version > "1.20" +2. Update CIDR of your VPC, vpc_cidcr = "10.2.0.0/16" ## Step 5: Commit changes and push to verify the pipeline diff --git a/examples/ci-cd/gitlab-ci-cd/main.tf b/examples/ci-cd/gitlab-ci-cd/main.tf index ec3527fb36..f22b3131c2 100644 --- a/examples/ci-cd/gitlab-ci-cd/main.tf +++ b/examples/ci-cd/gitlab-ci-cd/main.tf @@ -35,67 +35,77 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - vpc_name = join("-", [var.tenant, var.environment, var.zone, "vpc"]) - cluster_name = join("-", [var.tenant, var.environment, var.zone, "eks"]) - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.2.0.0/16" + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.1.3" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" + } +} + +#--------------------------------------------------------------- +# EKS Blueprints +#--------------------------------------------------------------- +module "eks_blueprints" { + source = "../../.." + + cluster_name = local.name + cluster_version = "1.21" + + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + + managed_node_groups = { + mg_5 = { + node_group_name = "managed-ondemand" + instance_types = ["m5.large"] + subnet_ids = module.vpc.private_subnets + max_size = 10 + } + } + + tags = local.tags } -module "aws_vpc" { +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.0" - name = local.vpc_name + name = local.name cidr = local.vpc_cidr - azs = data.aws_availability_zones.available.names - public_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in slice(data.aws_availability_zones.available.names, 0, 3) : cidrsubnet(local.vpc_cidr, 8, k + 10)] + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 } -} - -#--------------------------------------------------------------- -# Example to consume eks_blueprints module -#--------------------------------------------------------------- -module "eks_blueprints" { - source = "../../.." - tenant = var.tenant - environment = var.environment - zone = var.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES - cluster_version = "1.21" - - # EKS MANAGED NODE GROUPS - managed_node_groups = { - mg_4 = { - node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - subnet_ids = module.aws_vpc.private_subnets - max_size = "10" - } - } + tags = local.tags } diff --git a/examples/ci-cd/gitlab-ci-cd/variables.tf b/examples/ci-cd/gitlab-ci-cd/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/ci-cd/gitlab-ci-cd/variables.tf +++ b/examples/ci-cd/gitlab-ci-cd/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/complete-kubernetes-addons/main.tf b/examples/complete-kubernetes-addons/main.tf index df5f946cf5..7a152b0f42 100644 --- a/examples/complete-kubernetes-addons/main.tf +++ b/examples/complete-kubernetes-addons/main.tf @@ -30,73 +30,32 @@ provider "helm" { data "aws_availability_zones" "available" {} -data "aws_ami" "eks" { - owners = ["amazon"] - most_recent = true - - filter { - name = "name" - values = ["amazon-eks-node-${local.cluster_version}-*"] - } -} - locals { - tenant = var.tenant - environment = var.environment - zone = var.zone - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - cluster_version = "1.21" - - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - cidr = local.vpc_cidr - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - cluster_version = local.cluster_version + enable_amazon_prometheus = true #----------------------------------------------------------------------------------------------------------# # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks). @@ -114,7 +73,7 @@ module "eks_blueprints" { type = "ingress" self = true } - #Recommended outbound traffic for Node groups + # Recommended outbound traffic for Node groups egress_all = { description = "Node all egress" protocol = "-1" @@ -138,21 +97,21 @@ module "eks_blueprints" { } managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" instance_types = ["m5.large"] - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets force_update_version = true } } self_managed_node_groups = { - self_mg_4 = { + self_mg_5 = { node_group_name = "self-managed-ondemand" instance_type = "m5.large" launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows custom_ami_id = data.aws_ami.eks.id # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } @@ -168,22 +127,21 @@ module "eks_blueprints" { env = "fargate" } }] - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets additional_tags = { ExtraTag = "Fargate" } - }, + } } - # AWS Managed Services - enable_amazon_prometheus = true + tags = local.tags } data "aws_eks_addon_version" "latest" { for_each = toset(["vpc-cni", "coredns"]) addon_name = each.value - kubernetes_version = local.cluster_version + kubernetes_version = module.eks_blueprints.eks_cluster_version most_recent = true } @@ -191,7 +149,7 @@ data "aws_eks_addon_version" "default" { for_each = toset(["kube-proxy"]) addon_name = each.value - kubernetes_version = local.cluster_version + kubernetes_version = module.eks_blueprints.eks_cluster_version most_recent = false } @@ -202,6 +160,7 @@ module "eks_blueprints_kubernetes_addons" { eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id auto_scaling_group_names = module.eks_blueprints.self_managed_node_group_autoscaling_groups + # EKS Addons enable_amazon_eks_vpc_cni = true amazon_eks_vpc_cni_config = { addon_version = data.aws_eks_addon_version.latest["vpc-cni"].version @@ -222,7 +181,6 @@ module "eks_blueprints_kubernetes_addons" { enable_amazon_eks_aws_ebs_csi_driver = true - # Prometheus and Amazon Managed Prometheus integration enable_prometheus = true enable_amazon_prometheus = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint @@ -232,7 +190,7 @@ module "eks_blueprints_kubernetes_addons" { name = "aws-for-fluent-bit" chart = "aws-for-fluent-bit" repository = "https://aws.github.io/eks-charts" - version = "0.1.0" + version = "0.1.16" namespace = "logging" aws_for_fluent_bit_cw_log_group = "/${module.eks_blueprints.eks_cluster_id}/worker-fluentbit-logs" # Optional aws_for_fluentbit_cwlog_retention_in_days = 90 @@ -283,5 +241,55 @@ module "eks_blueprints_kubernetes_addons" { EOF } + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} + +data "aws_ami" "eks" { + most_recent = true + filter { + name = "name" + values = ["amazon-eks-node-${module.eks_blueprints.eks_cluster_version}-*"] + } + owners = ["amazon"] +} diff --git a/examples/complete-kubernetes-addons/variables.tf b/examples/complete-kubernetes-addons/variables.tf index 77db17caf4..e69de29bb2 100644 --- a/examples/complete-kubernetes-addons/variables.tf +++ b/examples/complete-kubernetes-addons/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "test" -} diff --git a/examples/complete-kubernetes-addons/versions.tf b/examples/complete-kubernetes-addons/versions.tf index c357c21aa2..7aa221989a 100644 --- a/examples/complete-kubernetes-addons/versions.tf +++ b/examples/complete-kubernetes-addons/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/complete-kubernetes-addons/terraform.tfstate" + # } } diff --git a/examples/crossplane/main.tf b/examples/crossplane/main.tf index fb55af0c1a..897643d5b7 100644 --- a/examples/crossplane/main.tf +++ b/examples/crossplane/main.tf @@ -45,80 +45,47 @@ provider "kubectl" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - min_size = "2" - subnet_ids = module.aws_vpc.private_subnets + instance_types = ["m5.large"] + min_size = 2 + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + source = "../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - # Refer to docs/add-ons/crossplane.md for advanced configuration enable_crossplane = true # You can choose to install either of crossplane_aws_provider or crossplane_jet_aws_provider to work with AWS @@ -139,4 +106,45 @@ module "eks_blueprints_kubernetes_addons" { # This example config uses AmazonS3FullAccess for demo purpose only, but you should select a policy with the minimum permissions required to provision your resources. additional_irsa_policies = ["arn:aws:iam::aws:policy/AmazonS3FullAccess"] } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } diff --git a/examples/crossplane/variables.tf b/examples/crossplane/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/crossplane/variables.tf +++ b/examples/crossplane/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/crossplane/versions.tf b/examples/crossplane/versions.tf index 2a74218fe2..7185c9ba61 100644 --- a/examples/crossplane/versions.tf +++ b/examples/crossplane/versions.tf @@ -20,7 +20,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/crossplane/terraform.tfstate" + # } } diff --git a/examples/eks-cluster-with-external-dns/main.tf b/examples/eks-cluster-with-external-dns/main.tf index 99df77d934..811ef4c11a 100644 --- a/examples/eks-cluster-with-external-dns/main.tf +++ b/examples/eks-cluster-with-external-dns/main.tf @@ -28,102 +28,56 @@ provider "helm" { } } -data "aws_availability_zones" "available" {} - data "aws_acm_certificate" "issued" { domain = var.acm_certificate_domain statuses = ["ISSUED"] } -locals { - tenant = "aws001" # AWS account name or unique id for tenant - environment = "preprod" # Environment area eg., preprod or prod - zone = "dev" # Environment with in one sub_tenant or business unit - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - terraform_version = "Terraform v1.0.1" -} - -#--------------------------------------------------------------- -# VPC -#--------------------------------------------------------------- - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] +data "aws_availability_zones" "available" {} - create_igw = true - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true +locals { + name = basename(path.cwd) + region = "us-west-2" - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_cluster module +# EKS Blueprints #--------------------------------------------------------------- - module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # Managed Node Group + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - min_size = "2" - subnet_ids = module.aws_vpc.private_subnets + instance_types = ["m5.large"] + min_size = 2 + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - #--------------------------------------------------------------- - # Globals - #--------------------------------------------------------------- - eks_cluster_id = module.eks_blueprints.eks_cluster_id eks_cluster_domain = var.eks_cluster_domain - #--------------------------------------------------------------- - # ARGO CD ADD-ON - #--------------------------------------------------------------- - enable_argocd = true argocd_applications = { workloads = { @@ -139,10 +93,6 @@ module "eks_blueprints_kubernetes_addons" { } } - #--------------------------------------------------------------- - # INGRESS NGINX ADD-ON - #--------------------------------------------------------------- - enable_ingress_nginx = true ingress_nginx_helm_config = { values = [templatefile("${path.module}/nginx-values.yaml", { @@ -151,15 +101,52 @@ module "eks_blueprints_kubernetes_addons" { })] } - #--------------------------------------------------------------- - # OTHER ADD-ONS - #--------------------------------------------------------------- - enable_aws_load_balancer_controller = true enable_external_dns = true + tags = local.tags + depends_on = [ - module.aws_vpc, + module.vpc, module.eks_blueprints.managed_node_groups ] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/eks-cluster-with-external-dns/variables.tf b/examples/eks-cluster-with-external-dns/variables.tf index 29ec1c0aaa..7e19f8d330 100644 --- a/examples/eks-cluster-with-external-dns/variables.tf +++ b/examples/eks-cluster-with-external-dns/variables.tf @@ -6,5 +6,6 @@ variable "eks_cluster_domain" { variable "acm_certificate_domain" { type = string - description = "*.example.com" + description = "Route53 certificate domain" + default = "*.example.com" } diff --git a/examples/eks-cluster-with-external-dns/versions.tf b/examples/eks-cluster-with-external-dns/versions.tf index df9b71a0f0..755f5d6933 100644 --- a/examples/eks-cluster-with-external-dns/versions.tf +++ b/examples/eks-cluster-with-external-dns/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/eks-cluster-with-external-dns/terraform.tfstate" + # } } diff --git a/examples/eks-cluster-with-new-vpc/main.tf b/examples/eks-cluster-with-new-vpc/main.tf index 2d92699ef6..149d683e63 100644 --- a/examples/eks-cluster-with-new-vpc/main.tf +++ b/examples/eks-cluster-with-new-vpc/main.tf @@ -31,73 +31,45 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs + name = basename(path.cwd) + region = "us-west-2" - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } + #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" instance_types = ["m5.large"] - min_size = "2" - subnet_ids = module.aws_vpc.private_subnets + min_size = 2 + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + source = "../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons @@ -105,11 +77,52 @@ module "eks_blueprints_kubernetes_addons" { enable_amazon_eks_coredns = true enable_amazon_eks_kube_proxy = true - #K8s Add-ons + # Add-ons enable_aws_load_balancer_controller = true enable_metrics_server = true enable_cluster_autoscaler = true enable_aws_cloudwatch_metrics = true + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/eks-cluster-with-new-vpc/outputs.tf b/examples/eks-cluster-with-new-vpc/outputs.tf index 49503255b7..aaed441953 100644 --- a/examples/eks-cluster-with-new-vpc/outputs.tf +++ b/examples/eks-cluster-with-new-vpc/outputs.tf @@ -1,16 +1,16 @@ output "vpc_private_subnet_cidr" { description = "VPC private subnet CIDR" - value = module.aws_vpc.private_subnets_cidr_blocks + value = module.vpc.private_subnets_cidr_blocks } output "vpc_public_subnet_cidr" { description = "VPC public subnet CIDR" - value = module.aws_vpc.public_subnets_cidr_blocks + value = module.vpc.public_subnets_cidr_blocks } output "vpc_cidr" { description = "VPC CIDR" - value = module.aws_vpc.vpc_cidr_block + value = module.vpc.vpc_cidr_block } output "eks_cluster_id" { diff --git a/examples/eks-cluster-with-new-vpc/variables.tf b/examples/eks-cluster-with-new-vpc/variables.tf index adb3fd6e26..2800300253 100644 --- a/examples/eks-cluster-with-new-vpc/variables.tf +++ b/examples/eks-cluster-with-new-vpc/variables.tf @@ -1,17 +1,6 @@ -variable "tenant" { +# tflint-ignore: terraform_unused_declarations +variable "cluster_name" { + description = "Name of cluster - used by Terratest for e2e test automation" type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" + default = "" } diff --git a/examples/eks-cluster-with-new-vpc/versions.tf b/examples/eks-cluster-with-new-vpc/versions.tf index df9b71a0f0..a967321a96 100644 --- a/examples/eks-cluster-with-new-vpc/versions.tf +++ b/examples/eks-cluster-with-new-vpc/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/eks-cluster-with-new-vpc/terraform.tfstate" + # } } diff --git a/examples/fully-private-eks-cluster/main.tf b/examples/fully-private-eks-cluster/main.tf index bd7f0c62f7..13a96f97d2 100644 --- a/examples/fully-private-eks-cluster/main.tf +++ b/examples/fully-private-eks-cluster/main.tf @@ -17,170 +17,133 @@ provider "kubernetes" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" + } } -module "aws_vpc" { +#--------------------------------------------------------------- +# EKS Blueprints +#--------------------------------------------------------------- +module "eks_blueprints" { + source = "../.." + + cluster_name = local.name + cluster_version = "1.21" + + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + + # Step 1. Set cluster API endpoint both private and public + cluster_endpoint_public_access = true + cluster_endpoint_private_access = true + + # Step 2. Change cluster endpoint to private only, comment out the above lines and uncomment the below lines. + # cluster_endpoint_public_access = false + # cluster_endpoint_private_access = true + + managed_node_groups = { + mg_5 = { + node_group_name = "managed-ondemand" + instance_types = ["m5.large"] + subnet_ids = module.vpc.private_subnets + } + } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.0" - name = local.vpc_name + name = local.name cidr = local.vpc_cidr - azs = local.azs - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] enable_dns_hostnames = true - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 } - manage_default_security_group = true - default_security_group_name = "${local.vpc_name}-endpoint-secgrp" - default_security_group_ingress = [ - { - protocol = -1 - from_port = 0 - to_port = 0 - cidr_blocks = local.vpc_cidr - }] - default_security_group_egress = [ - { - from_port = 0 - to_port = 0 - protocol = -1 - cidr_blocks = "0.0.0.0/0" - }] + tags = local.tags } -module "vpc_endpoint_gateway" { - source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints" - version = "v3.2.0" +module "vpc_endpoints_sg" { + source = "terraform-aws-modules/security-group/aws" + version = "~> 4.0" - create = true - vpc_id = module.aws_vpc.vpc_id + name = "${local.name}-vpc-endpoints" + description = "Security group for VPC endpoint access" + vpc_id = module.vpc.vpc_id - endpoints = { - s3 = { - service = "s3" - service_type = "Gateway" - route_table_ids = flatten([ - module.aws_vpc.intra_route_table_ids, - module.aws_vpc.private_route_table_ids]) - tags = { Name = "s3-vpc-Gateway" } + ingress_with_cidr_blocks = [ + { + rule = "https-443-tcp" + description = "VPC CIDR HTTPS" + cidr_blocks = join(",", module.vpc.private_subnets_cidr_blocks) }, - } -} + ] -data "aws_security_group" "default" { - name = "default" - vpc_id = module.aws_vpc.vpc_id + egress_with_cidr_blocks = [ + { + rule = "https-443-tcp" + description = "All egress HTTPS" + cidr_blocks = "0.0.0.0/0" + }, + ] + + tags = local.tags } module "vpc_endpoints" { source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints" - version = "v3.2.0" - create = true - vpc_id = module.aws_vpc.vpc_id - security_group_ids = [ - data.aws_security_group.default.id] - subnet_ids = module.aws_vpc.private_subnets - - endpoints = { - ssm = { - service = "ssm" - private_dns_enabled = true - }, - logs = { - service = "logs" - private_dns_enabled = true - }, - autoscaling = { - service = "autoscaling" - private_dns_enabled = true - }, - sts = { - service = "sts" - private_dns_enabled = true - }, - elasticloadbalancing = { - service = "elasticloadbalancing" - private_dns_enabled = true - }, - ec2 = { - service = "ec2" - private_dns_enabled = true - }, - ec2messages = { - service = "ec2messages" - private_dns_enabled = true - }, - ecr_api = { - service = "ecr.api" - private_dns_enabled = true - }, - ecr_dkr = { - service = "ecr.dkr" - private_dns_enabled = true - }, - kms = { - service = "kms" - private_dns_enabled = true - } - } - tags = { - Project = "EKS" - Endpoint = "true" - } -} -#--------------------------------------------------------------- -# Example to consume eks_blueprints module -#--------------------------------------------------------------- -module "eks_blueprints" { - source = "../.." - - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES - cluster_version = "1.21" - - # Step 1. Set cluster API endpoint both private and public - cluster_endpoint_public_access = true - cluster_endpoint_private_access = true + version = "~> 3.0" - # Step 2. Change cluster endpoint to private only, comment out the above lines and uncomment the below lines. - # cluster_endpoint_public_access = false - # cluster_endpoint_private_access = true + vpc_id = module.vpc.vpc_id + security_group_ids = [module.vpc_endpoints_sg.security_group_id] - # EKS MANAGED NODE GROUPS - managed_node_groups = { - mg_4 = { - node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - subnet_ids = module.aws_vpc.private_subnets + endpoints = merge({ + s3 = { + service = "s3" + service_type = "Gateway" + route_table_ids = module.vpc.private_route_table_ids + tags = { + Name = "${local.name}-s3" + } } - } + }, + { for service in toset(["autoscaling", "ecr.api", "ecr.dkr", "ec2", "ec2messages", "elasticloadbalancing", "sts", "kms", "logs", "ssm", "ssmmessages"]) : + replace(service, ".", "_") => + { + service = service + subnet_ids = module.vpc.private_subnets + private_dns_enabled = true + tags = { Name = "${local.name}-${service}" } + } + }) + + tags = local.tags } diff --git a/examples/fully-private-eks-cluster/variables.tf b/examples/fully-private-eks-cluster/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/fully-private-eks-cluster/variables.tf +++ b/examples/fully-private-eks-cluster/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/fully-private-eks-cluster/versions.tf b/examples/fully-private-eks-cluster/versions.tf index df9b71a0f0..479c94ff3d 100644 --- a/examples/fully-private-eks-cluster/versions.tf +++ b/examples/fully-private-eks-cluster/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/fully-private-eks-cluster/terraform.tfstate" + # } } diff --git a/examples/game-tech/agones-game-controller/main.tf b/examples/game-tech/agones-game-controller/main.tf index 5a946146ca..46609055dd 100644 --- a/examples/game-tech/agones-game-controller/main.tf +++ b/examples/game-tech/agones-game-controller/main.tf @@ -31,92 +31,52 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { - # 1> Node Group configuration - Part1 - node_group_name = "managed-ondemand" # Max 40 characters for node group name - create_launch_template = true # false will use the default launch template - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket - public_ip = true # Use this to enable public IP for EC2 instances; only for public subnets used in launch templates ; + mg_5 = { + node_group_name = "managed-ondemand" + create_launch_template = true + launch_template_os = "amazonlinux2eks" + public_ip = true pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent" - EOT - # 2> Node Group scaling configuration + yum install -y amazon-ssm-agent + systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent" + EOT + desired_size = 3 max_size = 12 min_size = 3 - max_unavailable = 1 # or percentage = 20 + max_unavailable = 1 - # 3> Node Group compute configuration - ami_type = "AL2_x86_64" # AL2_x86_64, AL2_x86_64_GPU, AL2_ARM_64, CUSTOM - capacity_type = "ON_DEMAND" # ON_DEMAND or SPOT - instance_types = ["m4.large"] # List of instances used only for SPOT type + ami_type = "AL2_x86_64" + capacity_type = "ON_DEMAND" + instance_types = ["m5.large"] disk_size = 50 - # 4> Node Group network configuration - subnet_ids = module.aws_vpc.public_subnets - - k8s_taints = [] + subnet_ids = module.vpc.public_subnets k8s_labels = { Environment = "preprod" @@ -128,8 +88,10 @@ module "eks_blueprints" { Name = "m5x-on-demand" subnet_type = "public" } - }, + } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { @@ -138,22 +100,19 @@ module "eks_blueprints_kubernetes_addons" { eks_cluster_id = module.eks_blueprints.eks_cluster_id eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id - #K8s Add-ons + # Add-ons enable_metrics_server = true enable_cluster_autoscaler = true - #--------------------------------------- - # ENABLE AGONES - #--------------------------------------- # NOTE: Agones requires a Node group in Public Subnets and enable Public IP enable_agones = true - # Optional agones_helm_chart agones_helm_config = { name = "agones" chart = "agones" repository = "https://agones.dev/chart/stable" version = "1.21.0" - namespace = "agones-system" # Agones recommends to install in it's own namespace such as `agones-system` as shown here. You can specify any namespace other than `kube-system` + namespace = "agones-system" + values = [templatefile("${path.module}/helm_values/agones-values.yaml", { expose_udp = true gameserver_namespaces = "{${join(",", ["default", "xbox-gameservers", "xbox-gameservers"])}}" @@ -162,5 +121,46 @@ module "eks_blueprints_kubernetes_addons" { })] } + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/game-tech/agones-game-controller/variables.tf b/examples/game-tech/agones-game-controller/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/game-tech/agones-game-controller/variables.tf +++ b/examples/game-tech/agones-game-controller/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/game-tech/agones-game-controller/versions.tf b/examples/game-tech/agones-game-controller/versions.tf index df9b71a0f0..8289aa05fb 100644 --- a/examples/game-tech/agones-game-controller/versions.tf +++ b/examples/game-tech/agones-game-controller/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/agones-game-controller/terraform.tfstate" + # } } diff --git a/examples/gitops/argocd/main.tf b/examples/gitops/argocd/main.tf index e02e2f363c..94520b1ff9 100644 --- a/examples/gitops/argocd/main.tf +++ b/examples/gitops/argocd/main.tf @@ -31,101 +31,43 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - - terraform_version = "Terraform v1.0.1" - - #--------------------------------------------------------------- - # ARGOCD ADD-ON APPLICATION - #--------------------------------------------------------------- - - addon_application = { - path = "chart" - repo_url = "https://github.com/aws-samples/eks-blueprints-add-ons.git" - add_on_application = true - } + name = basename(path.cwd) + region = "us-west-2" - #--------------------------------------------------------------- - # ARGOCD WORKLOAD APPLICATION - #--------------------------------------------------------------- + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - workload_application = { - path = "envs/dev" - repo_url = "https://github.com/aws-samples/eks-blueprints-workloads.git" - add_on_application = false + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# VPC +# EKS Blueprints #--------------------------------------------------------------- - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - create_igw = true - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - } -} - -#--------------------------------------------------------------- -# Example to consume eks_blueprints module -#--------------------------------------------------------------- - module "eks_blueprints" { - source = "../../../" + source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # Managed Node Group + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" instance_types = ["m5.large"] - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets - desired_size = "5" - max_size = "10" - min_size = "3" + desired_size = 5 + max_size = 10 + min_size = 3 } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { @@ -133,34 +75,74 @@ module "eks_blueprints_kubernetes_addons" { eks_cluster_id = module.eks_blueprints.eks_cluster_id - #--------------------------------------------------------------- - # ARGO CD ADD-ON - #--------------------------------------------------------------- - enable_argocd = true - argocd_manage_add_ons = true # Indicates that ArgoCD is responsible for managing/deploying Add-ons. + argocd_manage_add_ons = true # Indicates that ArgoCD is responsible for managing/deploying add-ons argocd_applications = { - addons = local.addon_application - workloads = local.workload_application + addons = { + path = "chart" + repo_url = "https://github.com/aws-samples/eks-blueprints-add-ons.git" + add_on_application = true + } + workloads = { + path = "envs/dev" + repo_url = "https://github.com/aws-samples/eks-blueprints-workloads.git" + add_on_application = false + } } - #--------------------------------------------------------------- - # ADD-ONS - #--------------------------------------------------------------- - - enable_aws_for_fluentbit = true - enable_aws_load_balancer_controller = true - enable_cert_manager = true - enable_cluster_autoscaler = true - enable_ingress_nginx = true - enable_karpenter = true - enable_keda = true - enable_metrics_server = true - enable_prometheus = true - enable_traefik = true - enable_vpa = true - enable_yunikorn = true - enable_argo_rollouts = true + # Add-ons + enable_aws_for_fluentbit = true + enable_cert_manager = true + enable_cluster_autoscaler = true + enable_karpenter = true + enable_keda = true + enable_metrics_server = true + enable_prometheus = true + enable_traefik = true + enable_vpa = true + enable_yunikorn = true + enable_argo_rollouts = true + + tags = local.tags depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/gitops/argocd/variables.tf b/examples/gitops/argocd/variables.tf index e6bd6caeab..e69de29bb2 100644 --- a/examples/gitops/argocd/variables.tf +++ b/examples/gitops/argocd/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "Zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/gitops/argocd/versions.tf b/examples/gitops/argocd/versions.tf index df9b71a0f0..bb7250475f 100644 --- a/examples/gitops/argocd/versions.tf +++ b/examples/gitops/argocd/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/argocd/terraform.tfstate" + # } } diff --git a/examples/ingress-controllers/nginx/README.md b/examples/ingress-controllers/nginx/README.md index a52f001882..2ea5931d1b 100644 --- a/examples/ingress-controllers/nginx/README.md +++ b/examples/ingress-controllers/nginx/README.md @@ -50,11 +50,9 @@ terraform plan to create resources ```shell script -terraform apply -target="module.aws_vpc" +terraform apply -target="module.vpc" terraform apply -target="module.eks_blueprints" -terraform apply -target="module.eks_blueprints_kubernetes_addons" -terraform apply -target="module.aws_load_balancer_controller" -terraform apply -target="module.ingress_nginx" +terraform apply ``` Enter `yes` for each apply @@ -90,11 +88,11 @@ The following command destroys the resources created by `terraform apply` ```shell script cd examples/ingress-controllers/nginx -terraform destroy -target="module.module.ingress_nginx" -auto-approve -terraform destroy -target="module.module.aws_load_balancer_controller" -auto-approve -terraform destroy -target="module.module.eks-blueprints-kubernetes-addons" -auto-approve -terraform destroy -target="module.module.eks-blueprints" -auto-approve -terraform destroy -target="module.aws_vpc" -auto-approve +terraform destroy -target="module.eks_blueprints_kubernetes_addons.module.ingress_nginx[0]" -auto-approve +terraform destroy -target="module.eks_blueprints_kubernetes_addons.module.aws_load_balancer_controller[0]" -auto-approve +terraform destroy -target="module.eks-blueprints-kubernetes-addons" -auto-approve +terraform destroy -target="module.eks-blueprints" -auto-approve +terraform destroy -auto-approve ``` ## Learn more diff --git a/examples/ingress-controllers/nginx/main.tf b/examples/ingress-controllers/nginx/main.tf index 2ca3cf876c..fd60129210 100644 --- a/examples/ingress-controllers/nginx/main.tf +++ b/examples/ingress-controllers/nginx/main.tf @@ -31,101 +31,100 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } + #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - min_size = "2" - subnet_ids = module.aws_vpc.private_subnets + instance_types = ["m5.large"] + min_size = 2 + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons enable_amazon_eks_coredns = true enable_amazon_eks_kube_proxy = true - #K8s Add-ons - enable_metrics_server = true - enable_cluster_autoscaler = true -} - -module "aws_load_balancer_controller" { - source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - + # Add-ons + enable_metrics_server = true + enable_cluster_autoscaler = true enable_aws_load_balancer_controller = true -} - -module "ingress_nginx" { - source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id enable_ingress_nginx = true ingress_nginx_helm_config = { version = "4.0.17" values = [templatefile("${path.module}/nginx_values.yaml", {})] } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } diff --git a/examples/ingress-controllers/nginx/variables.tf b/examples/ingress-controllers/nginx/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/ingress-controllers/nginx/variables.tf +++ b/examples/ingress-controllers/nginx/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/ingress-controllers/nginx/versions.tf b/examples/ingress-controllers/nginx/versions.tf index df9b71a0f0..81f80777ee 100644 --- a/examples/ingress-controllers/nginx/versions.tf +++ b/examples/ingress-controllers/nginx/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/nginx/terraform.tfstate" + # } } diff --git a/examples/ipv6-eks-cluster/main.tf b/examples/ipv6-eks-cluster/main.tf index 4bb1e76165..533132f19f 100644 --- a/examples/ipv6-eks-cluster/main.tf +++ b/examples/ipv6-eks-cluster/main.tf @@ -31,88 +31,48 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = "ipv6" # AWS account name or unique id for tenant - environment = "preprod" # Environment area eg., preprod or prod - zone = "dev" # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - enable_ipv6 = true - assign_ipv6_address_on_creation = true # Assign IPv6 address on subnet, must be disabled to change IPv6 CIDRs. This is the IPv6 equivalent of map_public_ip_on_launch - private_subnet_assign_ipv6_address_on_creation = true # Assign IPv6 address on private subnet, must be disabled to change IPv6 CIDRs. This is the IPv6 equivalent of map_public_ip_on_launch - - public_subnet_ipv6_prefixes = [0, 1, 2] # Assigns IPv6 private subnet id based on the Amazon provided /56 prefix base 10 integer (0-256). Must be of equal length to the corresponding IPv4 subnet list - private_subnet_ipv6_prefixes = [3, 4, 5] # Assigns IPv6 public subnet id based on the Amazon provided /56 prefix base 10 integer (0-256). Must be of equal length to the corresponding IPv4 subnet list - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } + #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # IPV6 + cluster_name = local.name + cluster_version = "1.21" cluster_ip_family = "ipv6" - # EKS CONTROL PLANE VARIABLES - cluster_version = "1.21" + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS MANAGED NODE GROUPS managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "mng-ondemand" instance_types = ["m5.large"] - min_size = "2" - desired_size = "2" - max_size = "10" - subnet_ids = module.aws_vpc.private_subnets + min_size = 2 + desired_size = 2 + max_size = 10 + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + source = "../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id enable_ipv6 = true # Enable Ipv6 network. Attaches new VPC CNI policy to the IRSA role @@ -121,8 +81,56 @@ module "eks_blueprints_kubernetes_addons" { enable_amazon_eks_kube_proxy = true enable_amazon_eks_vpc_cni = true - #K8s Add-ons + # Add-ons enable_aws_load_balancer_controller = true + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_ipv6 = true + assign_ipv6_address_on_creation = true + create_egress_only_igw = true + + public_subnet_ipv6_prefixes = [0, 1, 2] + private_subnet_ipv6_prefixes = [3, 4, 5] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/ipv6-eks-cluster/versions.tf b/examples/ipv6-eks-cluster/versions.tf index df9b71a0f0..9b2b906baf 100644 --- a/examples/ipv6-eks-cluster/versions.tf +++ b/examples/ipv6-eks-cluster/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/ipv6-eks-cluster/terraform.tfstate" + # } } diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 0fda1a676d..eec68ba72a 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -44,89 +44,33 @@ provider "kubectl" { data "aws_availability_zones" "available" {} -data "aws_ami" "amazonlinux2eks" { - most_recent = true - filter { - name = "name" - values = [local.amazonlinux2eks] - } - owners = ["amazon"] -} - -data "aws_ami" "bottlerocket" { - most_recent = true - filter { - name = "name" - values = [local.bottlerocket] - } - owners = ["amazon"] -} - locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - cluster_version = "1.21" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) node_group_name = "self-ondemand" - amazonlinux2eks = "amazon-eks-node-${local.cluster_version}-*" - bottlerocket = "bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*" - - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + cluster_name = local.name + cluster_version = "1.21" - # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # Allow Ingress rule for Worker node groups from Cluster Sec group for Karpenter node_security_group_additional_rules = { ingress_nodes_karpenter_port = { description = "Cluster API to Nodegroup for Karpenter" @@ -140,32 +84,45 @@ module "eks_blueprints" { # Add karpenter.sh/discovery tag so that we can use this as securityGroupSelector in karpenter provisioner node_security_group_tags = { - "karpenter.sh/discovery/${local.cluster_name}" = local.cluster_name + "karpenter.sh/discovery/${local.name}" = local.name } # Self-managed Node Group # Karpenter requires one node to get up and running self_managed_node_groups = { - self_mg_4 = { + self_mg_5 = { node_group_name = local.node_group_name launch_template_os = "amazonlinux2eks" max_size = 1 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } +module "eks_blueprints_kubernetes_addons" { + source = "../../modules/kubernetes-addons" + + eks_cluster_id = module.eks_blueprints.eks_cluster_id + + enable_karpenter = true + + tags = local.tags + + depends_on = [module.eks_blueprints.self_managed_node_groups] +} # Creates Launch templates for Karpenter # Launch template outputs will be used in Karpenter Provisioners yaml files. Checkout this examples/karpenter/provisioners/default_provisioner_with_launch_templates.yaml module "karpenter_launch_templates" { - source = "../../modules/launch-templates" + source = "../../modules/launch-templates" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - tags = { Name = "karpenter" } launch_template_config = { linux = { - ami = data.aws_ami.amazonlinux2eks.id + ami = data.aws_ami.eks.id launch_template_prefix = "karpenter" iam_instance_profile = module.eks_blueprints.self_managed_node_group_iam_instance_profile_id[0] vpc_security_group_ids = [module.eks_blueprints.worker_node_security_group_id] @@ -173,10 +130,11 @@ module "karpenter_launch_templates" { { device_name = "/dev/xvda" volume_type = "gp3" - volume_size = "200" + volume_size = 200 } ] - }, + } + bottlerocket = { ami = data.aws_ami.bottlerocket.id launch_template_os = "bottlerocket" @@ -187,22 +145,13 @@ module "karpenter_launch_templates" { { device_name = "/dev/xvda" volume_type = "gp3" - volume_size = "200" + volume_size = 200 } ] - }, + } } -} - -module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" - - eks_cluster_id = module.eks_blueprints.eks_cluster_id - - # Deploys Karpenter add-on - enable_karpenter = true - depends_on = [module.eks_blueprints.self_managed_node_groups] + tags = merge(local.tags, { Name = "karpenter" }) } # Deploying default provisioner for Karpenter autoscaler @@ -210,9 +159,9 @@ data "kubectl_path_documents" "karpenter_provisioners" { pattern = "${path.module}/provisioners/default_provisioner.yaml" vars = { azs = join(",", local.azs) - iam-instance-profile-id = format("%s-%s", local.cluster_name, local.node_group_name) - eks-cluster-id = local.cluster_name - eks-vpc_name = local.vpc_name + iam-instance-profile-id = "${local.name}-${local.node_group_name}" + eks-cluster-id = local.name + eks-vpc_name = local.name } } @@ -227,3 +176,62 @@ resource "kubectl_manifest" "karpenter_provisioner" { depends_on = [module.eks_blueprints_kubernetes_addons] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} + +data "aws_ami" "eks" { + owners = ["amazon"] + most_recent = true + + filter { + name = "name" + values = ["amazon-eks-node-${module.eks_blueprints.eks_cluster_version}-*"] + } +} + +data "aws_ami" "bottlerocket" { + owners = ["amazon"] + most_recent = true + + filter { + name = "name" + values = ["bottlerocket-aws-k8s-${module.eks_blueprints.eks_cluster_version}-x86_64-*"] + } +} diff --git a/examples/karpenter/variables.tf b/examples/karpenter/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/karpenter/variables.tf +++ b/examples/karpenter/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf index 2a74218fe2..1fcdcd4cd0 100644 --- a/examples/karpenter/versions.tf +++ b/examples/karpenter/versions.tf @@ -20,7 +20,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/karpenter/terraform.tfstate" + # } } diff --git a/examples/multi-tenancy-with-teams/main.tf b/examples/multi-tenancy-with-teams/main.tf index 5f83968069..f36ec42bb6 100644 --- a/examples/multi-tenancy-with-teams/main.tf +++ b/examples/multi-tenancy-with-teams/main.tf @@ -42,75 +42,39 @@ provider "kubectl" { } } -data "aws_availability_zones" "available" {} data "aws_caller_identity" "current" {} +data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true + name = basename(path.cwd) + region = "us-west-2" - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } -#------------------------------------------------------------------------------- -# Example to consume eks_blueprints module with Teams (Application and Platform) -#------------------------------------------------------------------------------- - +#--------------------------------------------------------------- +# EKS Blueprints +#--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUPS + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - subnet_ids = module.aws_vpc.private_subnets + instance_types = ["m5.large"] + subnet_ids = module.vpc.private_subnets } } @@ -141,7 +105,7 @@ module "eks_blueprints" { "secrets" = "10", "services" = "10" } - ## Manifests Example: + manifests_dir = "./manifests-team-red" users = [data.aws_caller_identity.current.arn] } @@ -160,9 +124,50 @@ module "eks_blueprints" { "secrets" = "20", "services" = "20" } - ## Manifests Example: + manifests_dir = "./manifests-team-blue" users = [data.aws_caller_identity.current.arn] } } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } diff --git a/examples/multi-tenancy-with-teams/variables.tf b/examples/multi-tenancy-with-teams/variables.tf index e87940a03d..e69de29bb2 100644 --- a/examples/multi-tenancy-with-teams/variables.tf +++ b/examples/multi-tenancy-with-teams/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "teams" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/multi-tenancy-with-teams/versions.tf b/examples/multi-tenancy-with-teams/versions.tf index 2a74218fe2..ce13b86695 100644 --- a/examples/multi-tenancy-with-teams/versions.tf +++ b/examples/multi-tenancy-with-teams/versions.tf @@ -20,7 +20,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/multi-tenancy-with-teams/terraform.tfstate" + # } } diff --git a/examples/node-groups/fargate-profiles/main.tf b/examples/node-groups/fargate-profiles/main.tf index 829b194113..060acc33c5 100644 --- a/examples/node-groups/fargate-profiles/main.tf +++ b/examples/node-groups/fargate-profiles/main.tf @@ -17,65 +17,30 @@ provider "kubernetes" { data "aws_availability_zones" "available" {} locals { - tenant = "aws001" # AWS account name or unique id for tenant - environment = "preprod" # Environment area eg., preprod or prod - zone = "dev" # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } + #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS CONTROL PLANE VARIABLES - cluster_version = "1.21" - #---------------------------------------------------------# - # FARGATE PROFILES - #---------------------------------------------------------# fargate_profiles = { default = { fargate_profile_name = "default" @@ -88,11 +53,52 @@ module "eks_blueprints" { } }] - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets additional_tags = { ExtraTag = "Fargate" } - }, - } # END OF FARGATE PROFILES + } + } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } diff --git a/examples/node-groups/fargate-profiles/versions.tf b/examples/node-groups/fargate-profiles/versions.tf index df9b71a0f0..aa8cd774e8 100644 --- a/examples/node-groups/fargate-profiles/versions.tf +++ b/examples/node-groups/fargate-profiles/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/fargate-profiles/terraform.tfstate" + # } } diff --git a/examples/node-groups/managed-node-groups/main.tf b/examples/node-groups/managed-node-groups/main.tf index 898f25400d..fee3d12071 100644 --- a/examples/node-groups/managed-node-groups/main.tf +++ b/examples/node-groups/managed-node-groups/main.tf @@ -28,8 +28,6 @@ provider "helm" { } } -data "aws_availability_zones" "available" {} - data "aws_ami" "amazonlinux2eks" { most_recent = true @@ -41,78 +39,36 @@ data "aws_ami" "amazonlinux2eks" { owners = ["amazon"] } -#------------------------------------------------------------------------ -# Local Variables -#------------------------------------------------------------------------ -locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Evironment with in one sub_tenant or business unit - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - count_availability_zone = (length(data.aws_availability_zones.available.names) <= 3) ? length(data.aws_availability_zones.available.zone_ids) : 3 - azs = slice(data.aws_availability_zones.available.names, 0, local.count_availability_zone) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - cluster_version = "1.21" - - terraform_version = "Terraform v1.0.1" - policy_arn_prefix = "arn:aws:iam::aws:policy" - ec2_principal = "ec2.amazonaws.com" -} - -#------------------------------------------------------------------------ -# AWS VPC Module -#------------------------------------------------------------------------ -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs +data "aws_availability_zones" "available" {} - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] +locals { + name = basename(path.cwd) + region = "us-west-2" - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true + cluster_version = "1.21" - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } -#------------------------------------------------------------------------ -# AWS EKS Blueprints Module -#------------------------------------------------------------------------ +#--------------------------------------------------------------- +# EKS Blueprints +#--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # Attach additional security group ids to Worker Security group ID - worker_additional_security_group_ids = [] # Optional - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = local.cluster_version + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + node_security_group_additional_rules = { # Extend node-to-node security group rules. Recommended and required for the Add-ons ingress_self_all = { @@ -123,7 +79,7 @@ module "eks_blueprints" { type = "ingress" self = true } - #Recommended outbound traffic for Node groups + # Recommended outbound traffic for Node groups egress_all = { description = "Node all egress" protocol = "-1" @@ -146,13 +102,12 @@ module "eks_blueprints" { } } - # EKS MANAGED NODE GROUPS managed_node_groups = { # Managed Node groups with minimum config mg5 = { node_group_name = "mg5" instance_types = ["m5.large"] - min_size = "2" + min_size = 2 create_iam_role = false # Changing `create_iam_role=false` to bring your own IAM Role iam_role_arn = aws_iam_role.managed_ng.arn disk_size = 100 # Disk size is used only with Managed Node Groups without Launch Templates @@ -307,11 +262,9 @@ module "eks_blueprints" { } } + tags = local.tags } -#------------------------------------------------------------------------ -# Kubernetes Add-on Module -#------------------------------------------------------------------------ module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" @@ -320,6 +273,46 @@ module "eks_blueprints_kubernetes_addons" { enable_metrics_server = true enable_cluster_autoscaler = true + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } #--------------------------------------------------------------- @@ -334,7 +327,7 @@ data "aws_iam_policy_document" "managed_ng_assume_role_policy" { ] principals { type = "Service" - identifiers = [local.ec2_principal] + identifiers = ["ec2.amazonaws.com"] } } } @@ -345,10 +338,14 @@ resource "aws_iam_role" "managed_ng" { assume_role_policy = data.aws_iam_policy_document.managed_ng_assume_role_policy.json path = "/" force_detach_policies = true - managed_policy_arns = ["${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy", - "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy", - "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly", - "${local.policy_arn_prefix}/AmazonSSMManagedInstanceCore"] + managed_policy_arns = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + ] + + tags = local.tags } resource "aws_iam_instance_profile" "managed_ng" { @@ -359,4 +356,6 @@ resource "aws_iam_instance_profile" "managed_ng" { lifecycle { create_before_destroy = true } + + tags = local.tags } diff --git a/examples/node-groups/managed-node-groups/variables.tf b/examples/node-groups/managed-node-groups/variables.tf index 87d1355214..e69de29bb2 100644 --- a/examples/node-groups/managed-node-groups/variables.tf +++ b/examples/node-groups/managed-node-groups/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws" -} - -variable "environment" { - type = string - default = "managed" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/node-groups/managed-node-groups/versions.tf b/examples/node-groups/managed-node-groups/versions.tf index df9b71a0f0..c30e922ddc 100644 --- a/examples/node-groups/managed-node-groups/versions.tf +++ b/examples/node-groups/managed-node-groups/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/managed-node-groups/terraform.tfstate" + # } } diff --git a/examples/node-groups/self-managed-node-groups/main.tf b/examples/node-groups/self-managed-node-groups/main.tf index 2282cbf02c..fb7052418f 100644 --- a/examples/node-groups/self-managed-node-groups/main.tf +++ b/examples/node-groups/self-managed-node-groups/main.tf @@ -17,76 +17,41 @@ provider "kubernetes" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - - terraform_version = "Terraform v1.0.1" - policy_arn_prefix = "arn:aws:iam::aws:policy" - ec2_principal = "ec2.amazonaws.com" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs + name = basename(path.cwd) + region = "us-west-2" - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } + #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + self_managed_node_groups = { self_mg4 = { node_group_name = "self_mg4" launch_template_os = "amazonlinux2eks" - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } self_mg5 = { node_group_name = "self_mg5" # Name is used to create a dedicated IAM role for each node group and adds to AWS-AUTH config map subnet_type = "private" - subnet_ids = module.aws_vpc.private_subnets # Optional defaults to Private Subnet Ids used by EKS Control Plane + subnet_ids = module.vpc.private_subnets # Optional defaults to Private Subnet Ids used by EKS Control Plane create_launch_template = true launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows custom_ami_id = "" # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. @@ -155,7 +120,7 @@ module "eks_blueprints" { subnet_type = "private" } } - } # END OF SELF MANAGED NODE GROUPS + } } #--------------------------------------------------------------- @@ -170,7 +135,7 @@ data "aws_iam_policy_document" "self_managed_ng_assume_role_policy" { ] principals { type = "Service" - identifiers = [local.ec2_principal] + identifiers = ["ec2.amazonaws.com"] } } } @@ -181,10 +146,14 @@ resource "aws_iam_role" "self_managed_ng" { assume_role_policy = data.aws_iam_policy_document.self_managed_ng_assume_role_policy.json path = "/" force_detach_policies = true - managed_policy_arns = ["${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy", - "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy", - "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly", - "${local.policy_arn_prefix}/AmazonSSMManagedInstanceCore"] + managed_policy_arns = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + ] + + tags = local.tags } resource "aws_iam_instance_profile" "self_managed_ng" { @@ -195,4 +164,45 @@ resource "aws_iam_instance_profile" "self_managed_ng" { lifecycle { create_before_destroy = true } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } diff --git a/examples/node-groups/self-managed-node-groups/variables.tf b/examples/node-groups/self-managed-node-groups/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/node-groups/self-managed-node-groups/variables.tf +++ b/examples/node-groups/self-managed-node-groups/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/node-groups/self-managed-node-groups/versions.tf b/examples/node-groups/self-managed-node-groups/versions.tf index df9b71a0f0..daf48803c1 100644 --- a/examples/node-groups/self-managed-node-groups/versions.tf +++ b/examples/node-groups/self-managed-node-groups/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/self-managed-node-groups/terraform.tfstate" + # } } diff --git a/examples/node-groups/windows-node-groups/README.md b/examples/node-groups/windows-node-groups/README.md index 6b2b0dae9a..3a6683ca67 100644 --- a/examples/node-groups/windows-node-groups/README.md +++ b/examples/node-groups/windows-node-groups/README.md @@ -59,9 +59,9 @@ EKS Cluster details can be extracted from terraform output or from AWS Console t ### Step 5: Run `update-kubeconfig` command. -`~/.kube/config` file gets updated with EKS cluster context from the below command. Replace the region name and EKS cluster name with your cluster's name. (If you did not change the `tenant`, `environment`, and `zone` values in this example, the EKS cluster name will be `aws001-preprod-dev-eks`.) +`~/.kube/config` file gets updated with EKS cluster context from the below command. Replace the region name and EKS cluster name with your cluster's name. - $ aws eks --region us-west-2 update-kubeconfig --name aws001-preprod-dev-eks + $ aws eks --region us-west-2 update-kubeconfig --name ### Step 6: (Optional) Deploy sample Windows and Linux workloads to verify support for both operating systems diff --git a/examples/node-groups/windows-node-groups/main.tf b/examples/node-groups/windows-node-groups/main.tf index 21a31751c3..afbe5479fc 100644 --- a/examples/node-groups/windows-node-groups/main.tf +++ b/examples/node-groups/windows-node-groups/main.tf @@ -31,89 +31,53 @@ provider "helm" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.1.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.0.1" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } - } + #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets - - # EKS CONTROL PLANE VARIABLES + cluster_name = local.name cluster_version = "1.21" - # EKS MANAGED NODE GROUP - # with Spot instances + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets + managed_node_groups = { mng_spot_medium = { node_group_name = "mng-spot-med" capacity_type = "SPOT" - instance_types = ["t3.medium", "t3a.medium"] - subnet_ids = module.aws_vpc.private_subnets - desired_size = "2" + instance_types = ["t3.large", "t3.xlarge"] + subnet_ids = module.vpc.private_subnets + desired_size = 2 disk_size = 30 } } - # SELF-MANAGED NODE GROUP with Windows support enable_windows_support = true - self_managed_node_groups = { ng_od_windows = { node_group_name = "ng-od-windows" launch_template_os = "windows" - instance_type = "m5n.large" - subnet_ids = module.aws_vpc.private_subnets - min_size = "2" + instance_type = "m5.large" + subnet_ids = module.vpc.private_subnets + min_size = 2 } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { @@ -125,8 +89,7 @@ module "eks_blueprints_kubernetes_addons" { enable_amazon_eks_coredns = true enable_amazon_eks_kube_proxy = true - # K8s Add-ons - # Ensure proper node assignment + # Add-ons enable_aws_load_balancer_controller = true aws_load_balancer_controller_helm_config = { set = [ @@ -157,5 +120,46 @@ module "eks_blueprints_kubernetes_addons" { ] } + tags = local.tags + depends_on = [module.eks_blueprints.managed_node_groups] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/node-groups/windows-node-groups/variables.tf b/examples/node-groups/windows-node-groups/variables.tf index adb3fd6e26..e69de29bb2 100644 --- a/examples/node-groups/windows-node-groups/variables.tf +++ b/examples/node-groups/windows-node-groups/variables.tf @@ -1,17 +0,0 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "aws001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} diff --git a/examples/node-groups/windows-node-groups/versions.tf b/examples/node-groups/windows-node-groups/versions.tf index df9b71a0f0..4f3abb9816 100644 --- a/examples/node-groups/windows-node-groups/versions.tf +++ b/examples/node-groups/windows-node-groups/versions.tf @@ -16,7 +16,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/windows-node-groups/terraform.tfstate" + # } } diff --git a/examples/observability/adot-amp-grafana-for-haproxy/main.tf b/examples/observability/adot-amp-grafana-for-haproxy/main.tf index f43350ee95..01d046672d 100644 --- a/examples/observability/adot-amp-grafana-for-haproxy/main.tf +++ b/examples/observability/adot-amp-grafana-for-haproxy/main.tf @@ -36,98 +36,64 @@ provider "grafana" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment within one sub_tenant or business unit - - region = "us-east-1" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - cluster_version = "1.21" - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + name = basename(path.cwd) + region = "us-west-2" vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - - terraform_version = "Terraform v1.1.7" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + azs = slice(data.aws_availability_zones.available.names, 0, 3) - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Provision EKS and Helm Charts +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS Control Plane Variables - cluster_version = local.cluster_version + enable_amazon_prometheus = true managed_node_groups = { t3_l = { node_group_name = "managed-ondemand" instance_types = ["t3.large"] min_size = 2 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } - # Provisions a new Amazon Managed Service for Prometheus workspace - enable_amazon_prometheus = true + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - # OTEL JMX use cases enable_cert_manager = true enable_opentelemetry_operator = true enable_adot_collector_haproxy = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint amazon_prometheus_workspace_region = local.region + + tags = local.tags } -# Configure HAProxy default Grafana dashboards resource "grafana_data_source" "prometheus" { type = "prometheus" name = "amp" is_default = true url = module.eks_blueprints.amazon_prometheus_workspace_endpoint + json_data { http_method = "GET" sigv4_auth = true @@ -219,3 +185,42 @@ resource "aws_prometheus_alert_manager_definition" "haproxy" { - name: 'default' EOF } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/observability/adot-amp-grafana-for-haproxy/variables.tf b/examples/observability/adot-amp-grafana-for-haproxy/variables.tf index cf0e717c97..df14cbb1bf 100644 --- a/examples/observability/adot-amp-grafana-for-haproxy/variables.tf +++ b/examples/observability/adot-amp-grafana-for-haproxy/variables.tf @@ -1,21 +1,3 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "adot001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "grafana_endpoint" { description = "Grafana endpoint" type = string diff --git a/examples/observability/adot-amp-grafana-for-haproxy/versions.tf b/examples/observability/adot-amp-grafana-for-haproxy/versions.tf index a6eb4835e3..9a5880ac50 100644 --- a/examples/observability/adot-amp-grafana-for-haproxy/versions.tf +++ b/examples/observability/adot-amp-grafana-for-haproxy/versions.tf @@ -24,7 +24,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/adot-amp-grafana-for-haproxy/terraform.tfstate" + # } } diff --git a/examples/observability/adot-amp-grafana-for-java/main.tf b/examples/observability/adot-amp-grafana-for-java/main.tf index 86f2f94119..04580b8e85 100644 --- a/examples/observability/adot-amp-grafana-for-java/main.tf +++ b/examples/observability/adot-amp-grafana-for-java/main.tf @@ -36,98 +36,64 @@ provider "grafana" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment within one sub_tenant or business unit - - region = "us-east-1" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - cluster_version = "1.21" - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + name = basename(path.cwd) + region = "us-west-2" vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - - terraform_version = "Terraform v1.1.7" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + azs = slice(data.aws_availability_zones.available.names, 0, 3) - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Provision EKS and Helm Charts +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS Control Plane Variables - cluster_version = local.cluster_version + enable_amazon_prometheus = true managed_node_groups = { t3_l = { node_group_name = "managed-ondemand" instance_types = ["t3.large"] min_size = 2 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } - # Provisions a new Amazon Managed Service for Prometheus workspace - enable_amazon_prometheus = true + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - # OTEL JMX use cases enable_cert_manager = true enable_opentelemetry_operator = true enable_adot_collector_java = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint amazon_prometheus_workspace_region = local.region + + tags = local.tags } -# Configure JMX default Grafana dashboards resource "grafana_data_source" "prometheus" { type = "prometheus" name = "amp" is_default = true url = module.eks_blueprints.amazon_prometheus_workspace_endpoint + json_data { http_method = "GET" sigv4_auth = true @@ -146,7 +112,7 @@ resource "grafana_dashboard" "jmx_dashboards" { } resource "aws_prometheus_rule_group_namespace" "java_jmx" { - name = "java_jmx_rules" + name = local.name workspace_id = module.eks_blueprints.amazon_prometheus_workspace_id data = <<-EOF @@ -179,3 +145,42 @@ resource "aws_prometheus_alert_manager_definition" "java_jmx" { - name: 'default' EOF } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/observability/adot-amp-grafana-for-java/variables.tf b/examples/observability/adot-amp-grafana-for-java/variables.tf index cf0e717c97..df14cbb1bf 100644 --- a/examples/observability/adot-amp-grafana-for-java/variables.tf +++ b/examples/observability/adot-amp-grafana-for-java/variables.tf @@ -1,21 +1,3 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "adot001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "grafana_endpoint" { description = "Grafana endpoint" type = string diff --git a/examples/observability/adot-amp-grafana-for-java/versions.tf b/examples/observability/adot-amp-grafana-for-java/versions.tf index a6eb4835e3..35dd8a0b2c 100644 --- a/examples/observability/adot-amp-grafana-for-java/versions.tf +++ b/examples/observability/adot-amp-grafana-for-java/versions.tf @@ -24,7 +24,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/adot-amp-grafana-for-java/terraform.tfstate" + # } } diff --git a/examples/observability/adot-amp-grafana-for-memcached/main.tf b/examples/observability/adot-amp-grafana-for-memcached/main.tf index 8b4edb37ee..2c12b533b5 100644 --- a/examples/observability/adot-amp-grafana-for-memcached/main.tf +++ b/examples/observability/adot-amp-grafana-for-memcached/main.tf @@ -36,82 +36,47 @@ provider "grafana" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment within one sub_tenant or business unit - - region = "us-east-1" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - cluster_version = "1.21" - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + name = basename(path.cwd) + region = "us-west-2" vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - - terraform_version = "Terraform v1.1.7" -} - -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + azs = slice(data.aws_availability_zones.available.names, 0, 3) - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Provision EKS and Helm Charts +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS Control Plane Variables - cluster_version = local.cluster_version + enable_amazon_prometheus = true managed_node_groups = { t3_l = { node_group_name = "managed-ondemand" instance_types = ["t3.large"] min_size = 2 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } - # Provisions a new Amazon Managed Service for Prometheus workspace - enable_amazon_prometheus = true + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id # OTEL JMX use cases @@ -120,14 +85,16 @@ module "eks_blueprints_kubernetes_addons" { enable_adot_collector_memcached = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint amazon_prometheus_workspace_region = local.region + + tags = local.tags } -# Configure HAProxy default Grafana dashboards resource "grafana_data_source" "prometheus" { type = "prometheus" name = "amp" is_default = true url = module.eks_blueprints.amazon_prometheus_workspace_endpoint + json_data { http_method = "GET" sigv4_auth = true @@ -175,3 +142,42 @@ resource "aws_prometheus_alert_manager_definition" "memcached" { - name: 'default' EOF } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/observability/adot-amp-grafana-for-memcached/variables.tf b/examples/observability/adot-amp-grafana-for-memcached/variables.tf index cf0e717c97..df14cbb1bf 100644 --- a/examples/observability/adot-amp-grafana-for-memcached/variables.tf +++ b/examples/observability/adot-amp-grafana-for-memcached/variables.tf @@ -1,21 +1,3 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "adot001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "grafana_endpoint" { description = "Grafana endpoint" type = string diff --git a/examples/observability/adot-amp-grafana-for-memcached/versions.tf b/examples/observability/adot-amp-grafana-for-memcached/versions.tf index a6eb4835e3..17f4719751 100644 --- a/examples/observability/adot-amp-grafana-for-memcached/versions.tf +++ b/examples/observability/adot-amp-grafana-for-memcached/versions.tf @@ -24,7 +24,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/adot-amp-grafana-for-memcached/terraform.tfstate" + # } } diff --git a/examples/observability/adot-amp-grafana-for-nginx/main.tf b/examples/observability/adot-amp-grafana-for-nginx/main.tf index c7e4a1f0dc..85628aebba 100644 --- a/examples/observability/adot-amp-grafana-for-nginx/main.tf +++ b/examples/observability/adot-amp-grafana-for-nginx/main.tf @@ -36,101 +36,64 @@ provider "grafana" { data "aws_availability_zones" "available" {} locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment within one sub_tenant or business unit - + name = basename(path.cwd) region = "us-west-2" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - cluster_version = "1.21" - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) vpc_cidr = "10.0.0.0/16" - vpc_name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - - terraform_version = "Terraform v1.1.7" -} - -#--------------------------------------------------------------- -# Networking -#--------------------------------------------------------------- -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = local.vpc_name - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + azs = slice(data.aws_availability_zones.available.names, 0, 3) - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Provision EKS and Helm Charts +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS Control Plane Variables - cluster_version = local.cluster_version + enable_amazon_prometheus = true managed_node_groups = { t3_l = { node_group_name = "managed-ondemand" instance_types = ["t3.large"] min_size = 2 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } - # Provisions a new Amazon Managed Service for Prometheus workspace - enable_amazon_prometheus = true + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - # OTEL Nginx use cases enable_cert_manager = true enable_opentelemetry_operator = true enable_adot_collector_nginx = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint amazon_prometheus_workspace_region = local.region + + tags = local.tags } -# Configure Nginx default Grafana dashboards resource "grafana_data_source" "prometheus" { type = "prometheus" name = "amp" is_default = true url = module.eks_blueprints.amazon_prometheus_workspace_endpoint + json_data { http_method = "GET" sigv4_auth = true @@ -149,10 +112,10 @@ resource "grafana_dashboard" "nginx_dashboards" { } resource "aws_prometheus_rule_group_namespace" "nginx" { - name = "obsa-nginx_rules" - + name = local.name workspace_id = module.eks_blueprints.amazon_prometheus_workspace_id - data = <<-EOF + + data = <<-EOF groups: - name: Nginx-HTTP-4xx-error-rate rules: @@ -189,7 +152,8 @@ resource "aws_prometheus_rule_group_namespace" "nginx" { resource "aws_prometheus_alert_manager_definition" "nginx" { workspace_id = module.eks_blueprints.amazon_prometheus_workspace_id - definition = <<-EOF + + definition = <<-EOF alertmanager_config: | route: receiver: 'default' @@ -197,3 +161,42 @@ resource "aws_prometheus_alert_manager_definition" "nginx" { - name: 'default' EOF } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/observability/adot-amp-grafana-for-nginx/variables.tf b/examples/observability/adot-amp-grafana-for-nginx/variables.tf index cf0e717c97..df14cbb1bf 100644 --- a/examples/observability/adot-amp-grafana-for-nginx/variables.tf +++ b/examples/observability/adot-amp-grafana-for-nginx/variables.tf @@ -1,21 +1,3 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "adot001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "grafana_endpoint" { description = "Grafana endpoint" type = string diff --git a/examples/observability/adot-amp-grafana-for-nginx/versions.tf b/examples/observability/adot-amp-grafana-for-nginx/versions.tf index 6b31c7afba..db04a14454 100644 --- a/examples/observability/adot-amp-grafana-for-nginx/versions.tf +++ b/examples/observability/adot-amp-grafana-for-nginx/versions.tf @@ -20,7 +20,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/adot-amp-grafana-for-nginx/terraform.tfstate" + # } } diff --git a/examples/observability/amp-amg-opensearch/main.tf b/examples/observability/amp-amg-opensearch/main.tf index 752977b82a..2b50cb8664 100644 --- a/examples/observability/amp-amg-opensearch/main.tf +++ b/examples/observability/amp-amg-opensearch/main.tf @@ -36,83 +36,50 @@ provider "grafana" { data "aws_availability_zones" "available" {} locals { - tenant = "aws001" # AWS account name or unique id for tenant - environment = "preprod" # Environment area eg., preprod or prod - zone = "observability" # Environment within one sub_tenant or business unit - region = "us-west-2" + name = basename(path.cwd) + region = "us-west-2" - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - terraform_version = "Terraform v1.1.4" -} - -#--------------------------------------------------------------- -# Networking -#--------------------------------------------------------------- -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] - - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Provision EKS and Helm Charts +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version + cluster_name = local.name + cluster_version = "1.21" - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS Control Plane Variables - cluster_version = "1.21" + enable_amazon_prometheus = true managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" instance_types = ["m5.xlarge"] min_size = 3 - subnet_ids = module.aws_vpc.private_subnets + subnet_ids = module.vpc.private_subnets } } - # Provisions a new Amazon Managed Service for Prometheus workspace - enable_amazon_prometheus = true + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../../modules/kubernetes-addons" + source = "../../../modules/kubernetes-addons" + eks_cluster_id = module.eks_blueprints.eks_cluster_id - #K8s Add-ons + # Add-ons enable_metrics_server = true enable_cluster_autoscaler = true enable_argocd = true @@ -124,7 +91,6 @@ module "eks_blueprints_kubernetes_addons" { } } - # Fluentbit enable_aws_for_fluentbit = true aws_for_fluentbit_irsa_policies = [aws_iam_policy.fluentbit_opensearch_access.arn] aws_for_fluentbit_helm_config = { @@ -134,14 +100,15 @@ module "eks_blueprints_kubernetes_addons" { })] } - # Prometheus and Amazon Managed Prometheus integration enable_prometheus = true enable_amazon_prometheus = true amazon_prometheus_workspace_endpoint = module.eks_blueprints.amazon_prometheus_workspace_endpoint + tags = local.tags + depends_on = [ module.eks_blueprints.managed_node_groups, - module.aws_vpc + module.vpc ] } @@ -171,7 +138,7 @@ resource "aws_elasticsearch_domain" "opensearch" { elasticsearch_version = "OpenSearch_1.1" cluster_config { - instance_type = "m4.large.elasticsearch" + instance_type = "m6g.large.elasticsearch" instance_count = 3 zone_awareness_enabled = true @@ -209,13 +176,15 @@ resource "aws_elasticsearch_domain" "opensearch" { } vpc_options { - subnet_ids = module.aws_vpc.public_subnets + subnet_ids = module.vpc.public_subnets security_group_ids = [aws_security_group.opensearch_access.id] } depends_on = [ aws_iam_service_linked_role.opensearch ] + + tags = local.tags } resource "aws_iam_service_linked_role" "opensearch" { @@ -235,7 +204,7 @@ resource "aws_elasticsearch_domain_policy" "opensearch_access_policy" { } resource "aws_security_group" "opensearch_access" { - vpc_id = module.aws_vpc.vpc_id + vpc_id = module.vpc.vpc_id description = "OpenSearch access" ingress { @@ -252,7 +221,7 @@ resource "aws_security_group" "opensearch_access" { to_port = 443 protocol = "tcp" - cidr_blocks = [local.vpc_cidr] + cidr_blocks = [module.vpc.vpc_cidr_block] } egress { @@ -262,4 +231,45 @@ resource "aws_security_group" "opensearch_access" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-egress-sgr } + + tags = local.tags +} + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags } diff --git a/examples/observability/amp-amg-opensearch/versions.tf b/examples/observability/amp-amg-opensearch/versions.tf index 6b31c7afba..0130b81620 100644 --- a/examples/observability/amp-amg-opensearch/versions.tf +++ b/examples/observability/amp-amg-opensearch/versions.tf @@ -20,7 +20,10 @@ terraform { } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/amp-amg-opensearch/terraform.tfstate" + # } } diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf index c6fa1c6a15..eecdc5119a 100644 --- a/examples/tls-with-aws-pca-issuer/main.tf +++ b/examples/tls-with-aws-pca-issuer/main.tf @@ -16,93 +16,76 @@ provider "kubectl" { } } -data "aws_availability_zones" "available" {} -data "aws_partition" "current" {} - -locals { - tenant = var.tenant # AWS account name or unique id for tenant - environment = var.environment # Environment area eg., preprod or prod - zone = var.zone # Environment with in one sub_tenant or business unit - - cluster_version = "1.21" - region = "us-west-2" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - cluster_name = join("-", [local.tenant, local.environment, local.zone, "eks"]) - - terraform_version = "Terraform v1.0.1" +provider "helm" { + kubernetes { + host = module.eks_blueprints.eks_cluster_endpoint + cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] + } + } } -module "aws_vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 3.0" - - name = join("-", [local.tenant, local.environment, local.zone, "vpc"]) - cidr = local.vpc_cidr - azs = local.azs - - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] +data "aws_partition" "current" {} +data "aws_availability_zones" "available" {} - enable_nat_gateway = true - create_igw = true - enable_dns_hostnames = true - single_nat_gateway = true +locals { + name = basename(path.cwd) + region = "us-west-2" - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } #--------------------------------------------------------------- -# Example to consume eks_blueprints module +# EKS Blueprints #--------------------------------------------------------------- module "eks_blueprints" { source = "../.." - tenant = local.tenant - environment = local.environment - zone = local.zone - terraform_version = local.terraform_version - - # EKS Cluster VPC and Subnet mandatory config - vpc_id = module.aws_vpc.vpc_id - private_subnet_ids = module.aws_vpc.private_subnets + cluster_name = local.name + cluster_version = "1.21" - # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version + vpc_id = module.vpc.vpc_id + private_subnet_ids = module.vpc.private_subnets - # EKS MANAGED NODE GROUPS managed_node_groups = { - mg_4 = { + mg_5 = { node_group_name = "managed-ondemand" - instance_types = ["m4.large"] - min_size = "2" - subnet_ids = module.aws_vpc.private_subnets + instance_types = ["m5.large"] + min_size = 2 + subnet_ids = module.vpc.private_subnets } } + + tags = local.tags } module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - aws_privateca_acmca_arn = aws_acmpca_certificate_authority.example.arn + source = "../../modules/kubernetes-addons" + + eks_cluster_id = module.eks_blueprints.eks_cluster_id # EKS Managed Add-ons enable_amazon_eks_vpc_cni = true enable_amazon_eks_coredns = true enable_amazon_eks_kube_proxy = true - #K8s Add-ons + # Add-ons enable_cert_manager = true enable_aws_privateca_issuer = true + aws_privateca_acmca_arn = aws_acmpca_certificate_authority.example.arn + + tags = local.tags depends_on = [module.eks_blueprints.managed_node_groups] } @@ -124,6 +107,8 @@ resource "aws_acmpca_certificate_authority" "example" { common_name = "example.com" } } + + tags = local.tags } resource "aws_acmpca_certificate" "example" { @@ -209,3 +194,42 @@ resource "kubectl_manifest" "example_pca_certificate" { kubectl_manifest.cluster_pca_issuer, ] } + +#--------------------------------------------------------------- +# Supporting Resources +#--------------------------------------------------------------- +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + # Manage so we can name + manage_default_network_acl = true + default_network_acl_tags = { Name = "${local.name}-default" } + manage_default_route_table = true + default_route_table_tags = { Name = "${local.name}-default" } + manage_default_security_group = true + default_security_group_tags = { Name = "${local.name}-default" } + + public_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = local.tags +} diff --git a/examples/tls-with-aws-pca-issuer/variables.tf b/examples/tls-with-aws-pca-issuer/variables.tf index 74fe16c2d3..29e82c3cc8 100644 --- a/examples/tls-with-aws-pca-issuer/variables.tf +++ b/examples/tls-with-aws-pca-issuer/variables.tf @@ -1,21 +1,3 @@ -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" - default = "pca001" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "certificate_name" { type = string description = "name for the certificate" diff --git a/examples/tls-with-aws-pca-issuer/versions.tf b/examples/tls-with-aws-pca-issuer/versions.tf index a5a71eb021..64c8c7898e 100644 --- a/examples/tls-with-aws-pca-issuer/versions.tf +++ b/examples/tls-with-aws-pca-issuer/versions.tf @@ -10,9 +10,16 @@ terraform { source = "gavinbunney/kubectl" version = ">= 1.14" } + helm = { + source = "hashicorp/helm" + version = ">= 2.4.1" + } } - backend "local" { - path = "local_tf_state/terraform-main.tfstate" - } + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/tls-with-aws-pca-issuer/terraform.tfstate" + # } } diff --git a/locals.tf b/locals.tf index fb9f95be29..b6467ca9bc 100644 --- a/locals.tf +++ b/locals.tf @@ -21,7 +21,6 @@ locals { vpc_id = var.vpc_id private_subnet_ids = var.private_subnet_ids public_subnet_ids = var.public_subnet_ids - tags = module.eks_tags.tags enable_workers = length(var.self_managed_node_groups) > 0 || length(var.managed_node_groups) > 0 ? true : false worker_security_group_ids = local.enable_workers ? compact(flatten([[module.aws_eks.node_security_group_id], var.worker_additional_security_group_ids])) : [] @@ -56,7 +55,7 @@ locals { service_ipv6_cidr = var.cluster_service_ipv6_cidr service_ipv4_cidr = var.cluster_service_ipv4_cidr - tags = local.tags + tags = var.tags } fargate_context = { @@ -64,7 +63,7 @@ locals { aws_partition_id = local.context.aws_partition_id iam_role_path = var.iam_role_path iam_role_permissions_boundary = var.iam_role_permissions_boundary - tags = local.tags + tags = var.tags } # Managed node IAM Roles for aws-auth @@ -127,13 +126,13 @@ locals { ] : [] # Teams - role_prefix_name = format("%s-%s-%s", var.tenant, var.environment, var.zone) - partition = local.context.aws_partition_id - account_id = local.context.aws_caller_identity_account_id + partition = local.context.aws_partition_id + account_id = local.context.aws_caller_identity_account_id + # TODO - move this into `aws-eks-teams` to avoid getting out of sync platform_teams_config_map = length(var.platform_teams) > 0 ? [ for platform_team_name, platform_team_data in var.platform_teams : { - rolearn : "arn:${local.partition}:iam::${local.account_id}:role/${format("%s-%s-%s", local.role_prefix_name, "${platform_team_name}", "Access")}" + rolearn : "arn:${local.partition}:iam::${local.account_id}:role/${module.aws_eks.cluster_id}-${platform_team_name}-access" username : "${platform_team_name}" groups : [ "system:masters" @@ -141,9 +140,10 @@ locals { } ] : [] + # TODO - move this into `aws-eks-teams` to avoid getting out of sync application_teams_config_map = length(var.application_teams) > 0 ? [ for team_name, team_data in var.application_teams : { - rolearn : "arn:${local.partition}:iam::${local.account_id}:role/${format("%s-%s-%s", local.role_prefix_name, "${team_name}", "Access")}" + rolearn : "arn:${local.partition}:iam::${local.account_id}:role/${module.aws_eks.cluster_id}-${team_name}-access" username : "${team_name}" groups : [ "${team_name}-group" @@ -151,5 +151,6 @@ locals { } ] : [] - cluster_iam_role_name = var.iam_role_name == null ? "${module.eks_tags.tags.name}-cluster-role" : var.iam_role_name + cluster_iam_role_name = var.iam_role_name == null ? "${var.cluster_name}-cluster-role" : var.iam_role_name + cluster_iam_role_arn = var.create_iam_role ? "arn:${local.context.aws_partition_id}:iam::${local.context.aws_caller_identity_account_id}:role/${local.cluster_iam_role_name}" : var.iam_role_arn } diff --git a/main.tf b/main.tf index 3f3bcdeab3..03cf52b372 100644 --- a/main.tf +++ b/main.tf @@ -1,16 +1,3 @@ -# --------------------------------------------------------------------------------------------------------------------- -# LABELING EKS RESOURCES -# --------------------------------------------------------------------------------------------------------------------- -module "eks_tags" { - source = "./modules/aws-resource-tags" - org = var.org - tenant = var.tenant - environment = var.environment - zone = var.zone - resource = "eks" - tags = merge(var.tags, { "created-by" = var.terraform_version }) -} - # --------------------------------------------------------------------------------------------------------------------- # CLUSTER KMS KEY # --------------------------------------------------------------------------------------------------------------------- @@ -18,11 +5,11 @@ module "kms" { count = var.create_eks && var.cluster_kms_key_arn == null ? 1 : 0 source = "./modules/aws-kms" - alias = "alias/${module.eks_tags.id}" - description = "${module.eks_tags.id} EKS cluster secret encryption key" + alias = "alias/${var.cluster_name}" + description = "${var.cluster_name} EKS cluster secret encryption key" policy = data.aws_iam_policy_document.eks_key.json deletion_window_in_days = var.cluster_kms_key_deletion_window_in_days - tags = module.eks_tags.tags + tags = var.tags } # --------------------------------------------------------------------------------------------------------------------- @@ -33,7 +20,7 @@ module "aws_eks" { version = "v18.17.0" create = var.create_eks - cluster_name = var.cluster_name == "" ? module.eks_tags.id : var.cluster_name + cluster_name = var.cluster_name cluster_version = var.cluster_version cluster_timeouts = var.cluster_timeouts @@ -75,7 +62,7 @@ module "aws_eks" { custom_oidc_thumbprints = var.custom_oidc_thumbprints # TAGS - tags = module.eks_tags.tags + tags = var.tags # CLUSTER LOGGING create_cloudwatch_log_group = var.create_cloudwatch_log_group @@ -149,10 +136,7 @@ module "aws_eks_teams" { application_teams = var.application_teams platform_teams = var.platform_teams - environment = var.environment - tenant = var.tenant - zone = var.zone iam_role_permissions_boundary = var.iam_role_permissions_boundary eks_cluster_id = module.aws_eks.cluster_id - tags = module.eks_tags.tags + tags = var.tags } diff --git a/modules/aws-eks-managed-node-groups/locals.tf b/modules/aws-eks-managed-node-groups/locals.tf index f8a84bbc0e..e40410a041 100644 --- a/modules/aws-eks-managed-node-groups/locals.tf +++ b/modules/aws-eks-managed-node-groups/locals.tf @@ -13,9 +13,9 @@ locals { iam_role_arn = null # iam_role_arn will be used if create_iam_role=false # Scaling Config - desired_size = "3" - max_size = "3" - min_size = "1" + desired_size = 3 + max_size = 3 + min_size = 1 disk_size = 50 # disk_size will be ignored when using Launch Templates # Upgrade Config @@ -58,7 +58,7 @@ locals { block_device_mappings = [{ device_name = "/dev/xvda" volume_type = "gp3" # The volume type. Can be standard, gp2, gp3, io1, io2, sc1 or st1 (Default: gp3). - volume_size = "100" + volume_size = 100 delete_on_termination = true encrypted = true kms_key_id = "" diff --git a/modules/aws-eks-self-managed-node-groups/locals.tf b/modules/aws-eks-self-managed-node-groups/locals.tf index 13731757bc..b815316d7b 100644 --- a/modules/aws-eks-self-managed-node-groups/locals.tf +++ b/modules/aws-eks-self-managed-node-groups/locals.tf @@ -31,8 +31,8 @@ locals { ] # AUTOSCALING - max_size = "3" - min_size = "1" + max_size = 3 + min_size = 1 subnet_type = "private" subnet_ids = [] additional_tags = {} diff --git a/modules/aws-eks-teams/README.md b/modules/aws-eks-teams/README.md index a49f2143f1..b32d446e09 100644 --- a/modules/aws-eks-teams/README.md +++ b/modules/aws-eks-teams/README.md @@ -164,12 +164,9 @@ No modules. |------|-------------|------|---------|:--------:| | [application\_teams](#input\_application\_teams) | Map of maps of teams to create | `any` | `{}` | no | | [eks\_cluster\_id](#input\_eks\_cluster\_id) | EKS Cluster name | `string` | n/a | yes | -| [environment](#input\_environment) | Environment area, e.g. prod or preprod | `string` | n/a | yes | | [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | | [platform\_teams](#input\_platform\_teams) | Map of maps of teams to create | `any` | `{}` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | -| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | n/a | yes | -| [zone](#input\_zone) | zone, e.g. dev or qa or load or ops etc... | `string` | n/a | yes | ## Outputs diff --git a/modules/aws-eks-teams/locals.tf b/modules/aws-eks-teams/locals.tf index f2a8f9250a..6b51b3d7a9 100644 --- a/modules/aws-eks-teams/locals.tf +++ b/modules/aws-eks-teams/locals.tf @@ -3,7 +3,6 @@ locals { account_id = data.aws_caller_identity.current.account_id eks_oidc_issuer_url = replace(data.aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer, "https://", "") eks_oidc_provider_arn = "arn:${local.partition}:iam::${local.account_id}:oidc-provider/${local.eks_oidc_issuer_url}" - role_prefix_name = format("%s-%s-%s", var.tenant, var.environment, var.zone) team_manifests = flatten([ for team_name, team_data in var.application_teams : diff --git a/modules/aws-eks-teams/main.tf b/modules/aws-eks-teams/main.tf index ea92a4d596..7f57195bcc 100644 --- a/modules/aws-eks-teams/main.tf +++ b/modules/aws-eks-teams/main.tf @@ -47,9 +47,11 @@ resource "kubernetes_resource_quota" "team_object_quota" { # IAM / RBAC # --------------------------------------------------------------------------------------------------------------------- resource "aws_iam_role" "team_access" { + for_each = { for team_name, team_data in var.application_teams : team_name => team_data if lookup(team_data, "users", "") != "" } + + name = "${var.eks_cluster_id}-${each.key}-access" permissions_boundary = var.iam_role_permissions_boundary - for_each = { for team_name, team_data in var.application_teams : team_name => team_data if lookup(team_data, "users", "") != "" } - name = format("%s-%s-%s", local.role_prefix_name, "${each.key}", "Access") + assume_role_policy = jsonencode({ "Version" : "2012-10-17", "Statement" : [ @@ -62,6 +64,7 @@ resource "aws_iam_role" "team_access" { } ] }) + tags = var.tags } @@ -133,10 +136,11 @@ resource "kubernetes_role_binding" "team" { } resource "aws_iam_role" "team_sa_irsa" { + for_each = var.application_teams + + name = "${var.eks_cluster_id}-${each.key}-sa-role" permissions_boundary = var.iam_role_permissions_boundary - for_each = var.application_teams - name = format("%s-%s-%s", local.role_prefix_name, "${each.key}", "sa-role") - tags = var.tags + assume_role_policy = jsonencode({ "Version" : "2012-10-17", "Statement" : [ @@ -155,6 +159,8 @@ resource "aws_iam_role" "team_sa_irsa" { } ] }) + + tags = var.tags } # --------------------------------------------------------------------------------------------------------------------- @@ -188,11 +194,12 @@ resource "kubectl_manifest" "team" { # --------------------------------------------------------------------------------------------------------------------- resource "aws_iam_role" "platform_team" { + for_each = var.platform_teams + + name = "${var.eks_cluster_id}-${each.key}-access" permissions_boundary = var.iam_role_permissions_boundary - for_each = var.platform_teams - name = format("%s-%s-%s", local.role_prefix_name, "${each.key}", "Access") - tags = var.tags managed_policy_arns = [aws_iam_policy.platform_team_eks_access[0].arn] + assume_role_policy = jsonencode({ "Version" : "2012-10-17", "Statement" : [ @@ -205,6 +212,8 @@ resource "aws_iam_role" "platform_team" { } ] }) + + tags = var.tags } # --------------------------------------------------------------------------------------------------------------------- @@ -213,7 +222,7 @@ resource "aws_iam_role" "platform_team" { resource "aws_iam_policy" "platform_team_eks_access" { count = length(var.platform_teams) > 0 ? 1 : 0 - name = format("%s-%s", local.role_prefix_name, "PlatformTeamEKSAccess") + name = "${var.eks_cluster_id}-PlatformTeamEKSAccess" path = "/" description = "Platform Team EKS Console Access" policy = data.aws_iam_policy_document.platform_team_eks_access[0].json diff --git a/modules/aws-eks-teams/variables.tf b/modules/aws-eks-teams/variables.tf index 6c0b2cc5aa..ca279637f7 100644 --- a/modules/aws-eks-teams/variables.tf +++ b/modules/aws-eks-teams/variables.tf @@ -10,21 +10,6 @@ variable "platform_teams" { default = {} } -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" -} - -variable "environment" { - type = string - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." -} - variable "tags" { description = "A map of tags to add to all resources" type = map(string) diff --git a/modules/aws-resource-tags/README.md b/modules/aws-resource-tags/README.md deleted file mode 100644 index 722008cb54..0000000000 --- a/modules/aws-resource-tags/README.md +++ /dev/null @@ -1,37 +0,0 @@ - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | - -## Providers - -No providers. - -## Modules - -No modules. - -## Resources - -No resources. - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [environment](#input\_environment) | zone, e.g. 'prod', 'preprod' | `string` | n/a | yes | -| [org](#input\_org) | tenant, which could be your organization name, e.g. aws' | `string` | `""` | no | -| [resource](#input\_resource) | Solution name, e.g. 'app' or 'cluster' | `string` | `""` | no | -| [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit`,`XYZ`) | `map(string)` | `{}` | no | -| [tenant](#input\_tenant) | Account Name or unique account unique id e.g., apps or management or aws007 | `string` | n/a | yes | -| [zone](#input\_zone) | Environment, e.g. 'load', 'zone', 'dev', 'uat' | `string` | n/a | yes | - -## Outputs - -| Name | Description | -|------|-------------| -| [id](#output\_id) | aws resource id | -| [tags](#output\_tags) | aws resource tags | - diff --git a/modules/aws-resource-tags/main.tf b/modules/aws-resource-tags/main.tf deleted file mode 100644 index 107212b231..0000000000 --- a/modules/aws-resource-tags/main.tf +++ /dev/null @@ -1,21 +0,0 @@ -locals { - org = var.org - tenant = var.tenant - environment = var.environment - zone = var.zone - resource = var.resource - delimiter = "-" - input_tags = var.tags - - id = join(local.delimiter, [local.tenant, local.environment, local.zone, local.resource]) - - tags_context = { - name = local.id - org = local.org - tenant = local.tenant - environment = local.environment - zone = local.zone - resource = local.resource - } - tags = merge(local.tags_context, local.input_tags) -} diff --git a/modules/aws-resource-tags/outputs.tf b/modules/aws-resource-tags/outputs.tf deleted file mode 100644 index 3dee145ad0..0000000000 --- a/modules/aws-resource-tags/outputs.tf +++ /dev/null @@ -1,9 +0,0 @@ -output "id" { - value = local.id - description = "aws resource id" -} - -output "tags" { - value = local.tags - description = "aws resource tags" -} diff --git a/modules/aws-resource-tags/variables.tf b/modules/aws-resource-tags/variables.tf deleted file mode 100644 index 494bd7df65..0000000000 --- a/modules/aws-resource-tags/variables.tf +++ /dev/null @@ -1,32 +0,0 @@ -variable "org" { - type = string - description = "tenant, which could be your organization name, e.g. aws'" - default = "" -} - -variable "tenant" { - type = string - description = "Account Name or unique account unique id e.g., apps or management or aws007" -} - -variable "environment" { - type = string - description = "zone, e.g. 'prod', 'preprod' " -} - -variable "zone" { - type = string - description = "Environment, e.g. 'load', 'zone', 'dev', 'uat'" -} - -variable "resource" { - type = string - description = "Solution name, e.g. 'app' or 'cluster'" - default = "" -} - -variable "tags" { - type = map(string) - default = {} - description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)" -} diff --git a/modules/aws-resource-tags/versions.tf b/modules/aws-resource-tags/versions.tf deleted file mode 100644 index 429c0b36d0..0000000000 --- a/modules/aws-resource-tags/versions.tf +++ /dev/null @@ -1,3 +0,0 @@ -terraform { - required_version = ">= 1.0.0" -} diff --git a/modules/kubernetes-addons/argocd/locals.tf b/modules/kubernetes-addons/argocd/locals.tf index b504b911d1..af22762deb 100644 --- a/modules/kubernetes-addons/argocd/locals.tf +++ b/modules/kubernetes-addons/argocd/locals.tf @@ -18,10 +18,11 @@ locals { repository = "https://argoproj.github.io/argo-helm" version = "3.33.3" namespace = local.namespace - timeout = "1200" + timeout = 1200 create_namespace = true values = local.default_helm_values description = "The ArgoCD Helm Chart deployment configuration" + wait = false } helm_config = merge( diff --git a/modules/kubernetes-addons/aws-load-balancer-controller/locals.tf b/modules/kubernetes-addons/aws-load-balancer-controller/locals.tf index 4f7e557a02..0a6116b72d 100644 --- a/modules/kubernetes-addons/aws-load-balancer-controller/locals.tf +++ b/modules/kubernetes-addons/aws-load-balancer-controller/locals.tf @@ -6,7 +6,7 @@ locals { name = local.name chart = local.name repository = "https://aws.github.io/eks-charts" - version = "1.3.2" + version = "1.4.1" namespace = "kube-system" timeout = "1200" values = local.default_helm_values diff --git a/modules/kubernetes-addons/helm-addon/main.tf b/modules/kubernetes-addons/helm-addon/main.tf index 7014605635..eed17e8cfd 100644 --- a/modules/kubernetes-addons/helm-addon/main.tf +++ b/modules/kubernetes-addons/helm-addon/main.tf @@ -4,7 +4,7 @@ resource "helm_release" "addon" { repository = var.helm_config["repository"] chart = var.helm_config["chart"] version = var.helm_config["version"] - timeout = try(var.helm_config["timeout"], 300) + timeout = try(var.helm_config["timeout"], 1200) values = try(var.helm_config["values"], null) create_namespace = var.irsa_config != null ? false : try(var.helm_config["create_namespace"], false) namespace = var.helm_config["namespace"] diff --git a/modules/kubernetes-addons/ingress-nginx/locals.tf b/modules/kubernetes-addons/ingress-nginx/locals.tf index cf18aa8da9..6d9af6c831 100644 --- a/modules/kubernetes-addons/ingress-nginx/locals.tf +++ b/modules/kubernetes-addons/ingress-nginx/locals.tf @@ -11,6 +11,7 @@ locals { values = local.default_helm_values set = [] description = "The NGINX HelmChart Ingress Controller deployment configuration" + wait = false } default_helm_values = [templatefile("${path.module}/values.yaml", {})] diff --git a/modules/launch-templates/README.md b/modules/launch-templates/README.md index 69a967c856..189dc13919 100644 --- a/modules/launch-templates/README.md +++ b/modules/launch-templates/README.md @@ -23,7 +23,7 @@ module "launch_templates" { { device_name = "/dev/xvda" volume_type = "io1" - volume_size = "200" + volume_size = 200 iops = 100 # io1 and io2 -> Min: 100 IOPS, Max: 100000 IOPS (up to 1000 IOPS per GiB) } ] @@ -38,7 +38,7 @@ module "launch_templates" { { device_name = "/dev/xvda" volume_type = "io2" - volume_size = "200" + volume_size = 200 iops = 3000 #gp3-> Min: 3000 IOPS, Max: 16000 IOPS. } ] @@ -53,7 +53,7 @@ module "launch_templates" { { device_name = "/dev/xvda" volume_type = "gp3" - volume_size = "200" + volume_size = 200 iops = 3000 # gp3 -> Min: 3000 IOPS, Max: 16000 IOPS. throughput = 1000 # gp3 -> 125 to 1000 } @@ -70,7 +70,7 @@ module "launch_templates" { { device_name = "/dev/xvda" volume_type = "gp2" - volume_size = "200" + volume_size = 200 } ] }, @@ -84,7 +84,7 @@ module "launch_templates" { { device_name = "/dev/xvda" volume_type = "gp2" - volume_size = "200" + volume_size = 200 } ] }, diff --git a/modules/launch-templates/locals.tf b/modules/launch-templates/locals.tf index 975790661c..f48c77c79e 100644 --- a/modules/launch-templates/locals.tf +++ b/modules/launch-templates/locals.tf @@ -22,7 +22,7 @@ locals { block_device_mappings = { device_name = "/dev/xvda" volume_type = "gp3" # The volume type. Can be standard, gp2, gp3, io1, io2, sc1 or st1 (Default: gp3). - volume_size = "200" + volume_size = 200 delete_on_termination = true encrypted = true kms_key_id = "" diff --git a/outputs.tf b/outputs.tf index 005aac3176..b53b363480 100644 --- a/outputs.tf +++ b/outputs.tf @@ -41,6 +41,11 @@ output "eks_cluster_status" { value = module.aws_eks.cluster_status } +output "eks_cluster_version" { + description = "The Kubernetes version for the cluster" + value = module.aws_eks.cluster_version +} + #------------------------------- # Cluster Security Group #------------------------------- diff --git a/test/src/eks_blueprints_e2e_test.go b/test/src/eks_blueprints_e2e_test.go index d4fd156ffe..650e2dd724 100644 --- a/test/src/eks_blueprints_e2e_test.go +++ b/test/src/eks_blueprints_e2e_test.go @@ -41,9 +41,9 @@ var ( "awsRegion" : "us-west-2"}*/ destroyModules = []string{ - "module.kubernetes_addons", + "module.eks_blueprints_kubernetes_addons", "module.eks_blueprints", - "module.aws_vpc", + "module.vpc", "full_destroy", } @@ -121,9 +121,7 @@ func TestEksBlueprintsE2E(t *testing.T) { /*The path to where our Terraform code is located*/ TerraformDir: tempExampleFolder, Vars: map[string]interface{}{ - "tenant": "aws", - "environment": "terra", - "zone": "test", + "cluster_name": "aws-terra-test-eks", }, // VarFiles: []string{testCase.name + ".tfvars"}, // The var file paths to pass to Terraform commands using -var-file option. //BackendConfig: map[string]interface{}{ @@ -144,9 +142,7 @@ func TestEksBlueprintsE2E(t *testing.T) { /*The path to where our Terraform code is located*/ TerraformDir: tempExampleFolder, Vars: map[string]interface{}{ - "tenant": "aws", - "environment": "terra", - "zone": "test", + "cluster_name": "aws-terra-test-eks", }, // VarFiles: []string{testCase.name + ".tfvars"}, // The var file paths to pass to Terraform commands using -var-file option. //BackendConfig: map[string]interface{}{ diff --git a/variables.tf b/variables.tf index ba323ceeef..584e676452 100644 --- a/variables.tf +++ b/variables.tf @@ -1,42 +1,9 @@ -#------------------------------- -# EKS Cluster Labels -#------------------------------- -variable "org" { - type = string - description = "tenant, which could be your organization name, e.g. aws'" - default = "" -} - -variable "tenant" { - type = string - description = "Account name or unique account id e.g., apps or management or aws007" - default = "aws" -} - -variable "environment" { - type = string - default = "preprod" - description = "Environment area, e.g. prod or preprod " -} - -variable "zone" { - type = string - description = "zone, e.g. dev or qa or load or ops etc..." - default = "dev" -} - variable "tags" { type = map(string) default = {} description = "Additional tags (e.g. `map('BusinessUnit`,`XYZ`)" } -variable "terraform_version" { - type = string - default = "Terraform" - description = "Terraform version" -} - #------------------------------- # VPC Config for EKS Cluster #------------------------------- @@ -104,6 +71,7 @@ variable "cluster_security_group_tags" { type = map(string) default = {} } + #------------------------------- # EKS Cluster VPC Config #------------------------------- @@ -154,6 +122,7 @@ variable "cluster_encryption_config" { })) default = [] } + #------------------------------- # EKS Cluster Kubernetes Network Config #------------------------------- @@ -246,7 +215,6 @@ variable "iam_role_additional_policies" { type = list(string) default = [] } -#------------------------------- variable "enable_irsa" { description = "Determines whether to create an OpenID Connect Provider for EKS to enable IRSA" @@ -296,13 +264,12 @@ variable "enable_windows_support" { #------------------------------- # Worker Additional Variables #------------------------------- - variable "create_node_security_group" { description = "Determines whether to create a security group for the node groups or use the existing `node_security_group_id`" type = bool default = true } -#rules added by + variable "node_security_group_additional_rules" { description = "List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source" type = any