diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 61e1a1083a81c..cd3cb152eeed5 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -628,6 +628,14 @@ func TestPrivateKindnet(t *testing.T) { runTestTerraformAWS(t) } +// TestPrivateKindnet runs the test on a configuration with private topology, flannel networking +func TestKindnetIPv6(t *testing.T) { + newIntegrationTest("minimal-ipv6.example.com", "minimal-ipv6-kindnet"). + withDefaultAddons30(). + withAddons(kindnetAddon). + runTestTerraformAWS(t) +} + // TestPrivateCalico runs the test on a configuration with private topology, calico networking func TestPrivateCalico(t *testing.T) { newIntegrationTest("privatecalico.example.com", "privatecalico"). diff --git a/pkg/model/components/kindnet.go b/pkg/model/components/kindnet.go index 556c3bfe6a9f1..61eb0ba2f765e 100644 --- a/pkg/model/components/kindnet.go +++ b/pkg/model/components/kindnet.go @@ -40,19 +40,26 @@ func (b *KindnetOptionsBuilder) BuildOptions(o *kops.Cluster) error { c.Version = "v1.8.0" } - // Kindnet should masquerade well known ranges if kops is not doing it if c.Masquerade == nil { - c.Masquerade = &kops.KindnetMasqueradeSpec{ - Enabled: fi.PtrTo(true), - } - if clusterSpec.Networking.NetworkCIDR != "" { - c.Masquerade.NonMasqueradeCIDRs = append(c.Masquerade.NonMasqueradeCIDRs, clusterSpec.Networking.NetworkCIDR) - } - if clusterSpec.Networking.PodCIDR != "" { - c.Masquerade.NonMasqueradeCIDRs = append(c.Masquerade.NonMasqueradeCIDRs, clusterSpec.Networking.PodCIDR) - } - if clusterSpec.Networking.ServiceClusterIPRange != "" { - c.Masquerade.NonMasqueradeCIDRs = append(c.Masquerade.NonMasqueradeCIDRs, clusterSpec.Networking.ServiceClusterIPRange) + c.Masquerade = &kops.KindnetMasqueradeSpec{} + if clusterSpec.IsIPv6Only() { + // Kindnet should NOT masquerade when IPv6 is used + c.Masquerade.Enabled = fi.PtrTo(false) + if o.GetCloudProvider() != kops.CloudProviderAWS { + c.NAT64 = fi.PtrTo(true) + } + } else { + // Kindnet should masquerade well known ranges if kops is not doing it + c.Masquerade.Enabled = fi.PtrTo(true) + if clusterSpec.Networking.NetworkCIDR != "" { + c.Masquerade.NonMasqueradeCIDRs = append(c.Masquerade.NonMasqueradeCIDRs, clusterSpec.Networking.NetworkCIDR) + } + if clusterSpec.Networking.PodCIDR != "" { + c.Masquerade.NonMasqueradeCIDRs = append(c.Masquerade.NonMasqueradeCIDRs, clusterSpec.Networking.PodCIDR) + } + if clusterSpec.Networking.ServiceClusterIPRange != "" { + c.Masquerade.NonMasqueradeCIDRs = append(c.Masquerade.NonMasqueradeCIDRs, clusterSpec.Networking.ServiceClusterIPRange) + } } } diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-ASGLifecycle_event_pattern b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-ASGLifecycle_event_pattern new file mode 100644 index 0000000000000..c8db9dbe9c41b --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-ASGLifecycle_event_pattern @@ -0,0 +1 @@ +{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceScheduledChange_event_pattern b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceScheduledChange_event_pattern new file mode 100644 index 0000000000000..fb4ea7defdc52 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceScheduledChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceStateChange_event_pattern b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceStateChange_event_pattern new file mode 100644 index 0000000000000..8c2916419dc7d --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceStateChange_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-SpotInterruption_event_pattern b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-SpotInterruption_event_pattern new file mode 100644 index 0000000000000..2d0e83b4164b2 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-SpotInterruption_event_pattern @@ -0,0 +1 @@ +{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_masters.minimal-ipv6.example.com_policy b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_masters.minimal-ipv6.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_masters.minimal-ipv6.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_nodes.minimal-ipv6.example.com_policy b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_nodes.minimal-ipv6.example.com_policy new file mode 100644 index 0000000000000..66d5de1d5ae1e --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_nodes.minimal-ipv6.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_policy_masters.minimal-ipv6.example.com_policy b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_policy_masters.minimal-ipv6.example.com_policy new file mode 100644 index 0000000000000..7c31f2de8c5eb --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_policy_masters.minimal-ipv6.example.com_policy @@ -0,0 +1,279 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal-ipv6.example.com/*" + }, + { + "Action": [ + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:GetObject", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-ipv6.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:GetObject", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal-ipv6.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:GetHostedZone", + "route53:ListResourceRecordSets" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:snapshot/*", + "arn:aws-test:ec2:*:*:volume/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:snapshot/*", + "arn:aws-test:ec2:*:*:volume/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:AssignIpv6Addresses", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:GetInstanceTypesFromInstanceRequirements", + "ec2:ModifyInstanceAttribute", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "iam:CreateServiceLinkedRole", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:CreateGrant", + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:GenerateDataKey*", + "kms:GenerateRandom", + "kms:ReEncrypt*", + "sqs:DeleteMessage", + "sqs:ReceiveMessage" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:CompleteLifecycleAction", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "minimal-ipv6.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "minimal-ipv6.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_policy_nodes.minimal-ipv6.example.com_policy b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_policy_nodes.minimal-ipv6.example.com_policy new file mode 100644 index 0000000000000..c1eeb04ab618c --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_iam_role_policy_nodes.minimal-ipv6.example.com_policy @@ -0,0 +1,32 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:AssignIpv6Addresses", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:ModifyInstanceAttribute", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_key_pair_kubernetes.minimal-ipv6.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_key_pair_kubernetes.minimal-ipv6.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_key_pair_kubernetes.minimal-ipv6.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_launch_template_master-us-test-1a.masters.minimal-ipv6.example.com_user_data b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_launch_template_master-us-test-1a.masters.minimal-ipv6.example.com_user_data new file mode 100644 index 0000000000000..191f7aef93e20 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_launch_template_master-us-test-1a.masters.minimal-ipv6.example.com_user_data @@ -0,0 +1,138 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + echo "== Downloading $1 with hash $2 from $3 ==" + local -r file="$1" + local -r hash="$2" + local -a urls + mapfile -t urls < <(split-commas "$3") + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O ${file} --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "== Downloading ${url} using ${cmd} ==" + if ! (${cmd} "${url}"); then + echo "== Failed to download ${url} using ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Failed to validate hash for ${url} ==" + rm -f "${file}" + else + echo "== Downloaded ${url} with hash ${hash} ==" + return 0 + fi + done + done + + echo "== All downloads failed; sleeping before retrying ==" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum "${file}" | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo "$1" | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "== Running nodeup ==" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured ==" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ClusterName: minimal-ipv6.example.com +ConfigBase: memfs://clusters.example.com/minimal-ipv6.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: ControlPlane +NodeupConfigHash: WcKFw+eRNcHdNeuSt3S6LA6Pcdi1dr+6pKaRH63i73s= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_launch_template_nodes.minimal-ipv6.example.com_user_data b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_launch_template_nodes.minimal-ipv6.example.com_user_data new file mode 100644 index 0000000000000..193a1cc8da9f1 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_launch_template_nodes.minimal-ipv6.example.com_user_data @@ -0,0 +1,161 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + echo "== Downloading $1 with hash $2 from $3 ==" + local -r file="$1" + local -r hash="$2" + local -a urls + mapfile -t urls < <(split-commas "$3") + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O ${file} --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "== Downloading ${url} using ${cmd} ==" + if ! (${cmd} "${url}"); then + echo "== Failed to download ${url} using ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Failed to validate hash for ${url} ==" + rm -f "${file}" + else + echo "== Downloaded ${url} with hash ${hash} ==" + return 0 + fi + done + done + + echo "== All downloads failed; sleeping before retrying ==" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum "${file}" | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo "$1" | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "== Running nodeup ==" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured ==" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ClusterName: minimal-ipv6.example.com +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + servers: + - https://kops-controller.internal.minimal-ipv6.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: Q7EVJVRXlMWWbEgd03Em1H2tzp2EMpJvTx1zFEAJ/pY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..d366cde2d8a50 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,241 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal-ipv6.example.com +spec: + api: + loadBalancer: + class: Network + type: Public + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + version: v1.38.1 + manageStorageClasses: true + nodeIPFamilies: + - ipv6 + - ipv4 + cloudControllerManager: + allocateNodeCIDRs: false + clusterName: minimal-ipv6.example.com + configureCloudRoutes: false + image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.31.0 + leaderElection: + leaderElect: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/minimal-ipv6.example.com + containerd: + logLevel: info + runc: + version: 1.2.4 + version: 1.7.24 + dnsZone: Z1AFAKE1ZON3YO + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/minimal-ipv6.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + manager: + backupRetentionDays: 90 + name: main + version: 3.5.17 + - backups: + backupStore: memfs://clusters.example.com/minimal-ipv6.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + manager: + backupRetentionDays: 90 + name: events + version: 3.5.17 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://clusters.example.com/minimal-ipv6.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: '::' + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.32.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com + serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks + serviceClusterIPRange: fd00:5e4f:ce::/108 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: false + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterName: minimal-ipv6.example.com + configureCloudRoutes: false + controllers: + - '*' + - -nodeipam + image: registry.k8s.io/kube-controller-manager:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.23.0 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: fd00:5e4f:ce::a + upstreamNameservers: + - fd00:ec2::253 + kubeProxy: + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.32.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: fd00:5e4f:ce::a + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.9 + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.32.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: fd00:5e4f:ce::a + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podInfraContainerImage: registry.k8s.io/pause:3.9 + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.minimal-ipv6.example.com + networkCIDR: 172.20.0.0/16 + networking: + kindnet: + fastPathThreshold: 0 + logLevel: 2 + masquerade: + enabled: false + version: v1.8.0 + nodeTerminationHandler: + cpuRequest: 50m + deleteSQSMsgIfNodeNotFound: false + enableRebalanceDraining: false + enableRebalanceMonitoring: false + enableScheduledEventDraining: true + enableSpotInterruptionDraining: true + enabled: true + excludeFromLoadBalancers: true + managedASGTag: aws-node-termination-handler/managed + memoryRequest: 64Mi + podTerminationGracePeriod: -1 + prometheusEnable: false + taintNode: false + version: v1.22.0 + nonMasqueradeCIDR: ::/0 + secretStore: memfs://clusters.example.com/minimal-ipv6.example.com/secrets + serviceClusterIPRange: fd00:5e4f:ce::/108 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - ipv6CIDR: 2001:db8:0:111::/64 + name: us-test-1a + type: Private + zone: us-test-1a + - ipv6CIDR: 2001:db8:0:112::/64 + name: us-test-1b + type: Private + zone: us-test-1b + - cidr: 172.20.32.0/19 + ipv6CIDR: 2001:db8:0:113::/64 + name: dualstack-us-test-1a + type: DualStack + zone: us-test-1a + - cidr: 172.20.64.0/19 + ipv6CIDR: 2001:db8:0:114::/64 + name: dualstack-us-test-1b + type: DualStack + zone: us-test-1b + - cidr: 172.20.0.0/22 + ipv6CIDR: 2001:db8:0:115::/64 + name: utility-us-test-1a + type: Utility + zone: us-test-1a + - cidr: 172.20.4.0/22 + ipv6CIDR: 2001:db8:0:116::/64 + name: utility-us-test-1b + type: Utility + zone: us-test-1b + topology: + dns: + type: Public diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..432cd0d5554c5 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.5.17" +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..432cd0d5554c5 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.5.17" +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000000..b7340298dcdd5 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000000..3d7dd831fcb2c --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,138 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/minimal-ipv6.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.minimal-ipv6.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/control-plane=1 + --volume-tag=kubernetes.io/cluster/minimal-ipv6.example.com=owned > /tmp/pipe + 2>&1 + env: + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 90d + image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20241012 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /opt + name: opt + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + initContainers: + - args: + - --target-dir=/opt/kops-utils/ + - --src=/ko-app/kops-utils-cp + command: + - /ko-app/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.31.0-beta.1 + name: kops-utils-cp + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.4.13 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:3.4.13-0 + name: init-etcd-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.5.17 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:3.5.17-0 + name: init-etcd-3-5-17 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.4.3 + - --src=/opt/etcd-v3.4.13/etcd + - --src=/opt/etcd-v3.4.13/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.31.0-beta.1 + name: init-etcd-symlinks-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.5.0 + - --target-dir=/opt/etcd-v3.5.1 + - --target-dir=/opt/etcd-v3.5.13 + - --target-dir=/opt/etcd-v3.5.3 + - --target-dir=/opt/etcd-v3.5.4 + - --target-dir=/opt/etcd-v3.5.6 + - --target-dir=/opt/etcd-v3.5.7 + - --target-dir=/opt/etcd-v3.5.9 + - --src=/opt/etcd-v3.5.17/etcd + - --src=/opt/etcd-v3.5.17/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.31.0-beta.1 + name: init-etcd-symlinks-3-5-17 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - emptyDir: {} + name: opt + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000000..3c81f11e74e1a --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,138 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/minimal-ipv6.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.minimal-ipv6.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/control-plane=1 + --volume-tag=kubernetes.io/cluster/minimal-ipv6.example.com=owned > /tmp/pipe + 2>&1 + env: + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 90d + image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20241012 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /opt + name: opt + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + initContainers: + - args: + - --target-dir=/opt/kops-utils/ + - --src=/ko-app/kops-utils-cp + command: + - /ko-app/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.31.0-beta.1 + name: kops-utils-cp + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.4.13 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:3.4.13-0 + name: init-etcd-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.5.17 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:3.5.17-0 + name: init-etcd-3-5-17 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.4.3 + - --src=/opt/etcd-v3.4.13/etcd + - --src=/opt/etcd-v3.4.13/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.31.0-beta.1 + name: init-etcd-symlinks-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.5.0 + - --target-dir=/opt/etcd-v3.5.1 + - --target-dir=/opt/etcd-v3.5.13 + - --target-dir=/opt/etcd-v3.5.3 + - --target-dir=/opt/etcd-v3.5.4 + - --target-dir=/opt/etcd-v3.5.6 + - --target-dir=/opt/etcd-v3.5.7 + - --target-dir=/opt/etcd-v3.5.9 + - --src=/opt/etcd-v3.5.17/etcd + - --src=/opt/etcd-v3.5.17/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.31.0-beta.1 + name: init-etcd-symlinks-3-5-17 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - emptyDir: {} + name: opt + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..21a36c9bb0d56 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.31.0-beta.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content new file mode 100644 index 0000000000000..3089f7d99c381 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content @@ -0,0 +1,236 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + k8s-app: aws-cloud-controller-manager + name: aws-cloud-controller-manager + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: aws-cloud-controller-manager + template: + metadata: + creationTimestamp: null + labels: + k8s-app: aws-cloud-controller-manager + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --allocate-node-cidrs=false + - --cluster-name=minimal-ipv6.example.com + - --configure-cloud-routes=false + - --leader-elect=true + - --v=2 + - --cloud-provider=aws + - --use-service-account-credentials=true + - --cloud-config=/etc/kubernetes/cloud.config + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.31.0 + imagePullPolicy: IfNotPresent + name: aws-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/kubernetes/cloud.config + name: cloudconfig + readOnly: true + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccountName: aws-cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node.kubernetes.io/not-ready + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - hostPath: + path: /etc/kubernetes/cloud.config + type: "" + name: cloudconfig + updateStrategy: + type: RollingUpdate + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: aws-cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: system:cloud-controller-manager +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - '*' +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - node-controller + - service-controller + - route-controller + resources: + - serviceaccounts/token + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: aws-cloud-controller.addons.k8s.io + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- apiGroup: "" + kind: ServiceAccount + name: aws-cloud-controller-manager + namespace: kube-system diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content new file mode 100644 index 0000000000000..83da63135eed5 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content @@ -0,0 +1,1146 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + +--- + +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-attacher-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - patch +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-role +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-provisioner-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - patch + - delete +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattributesclasses + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-resizer-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattributesclasses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-external-snapshotter-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - watch + - update + - patch + - create +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list + - watch + - update + - patch + - create +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch +- apiGroups: + - groupsnapshot.storage.k8s.io + resources: + - volumegroupsnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - groupsnapshot.storage.k8s.io + resources: + - volumegroupsnapshotcontents + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - groupsnapshot.storage.k8s.io + resources: + - volumegroupsnapshotcontents/status + verbs: + - update + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-attacher-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-attacher-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node-getter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-csi-node-role +subjects: +- kind: ServiceAccount + name: ebs-csi-node-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-provisioner-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-provisioner-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-resizer-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-resizer-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-snapshotter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ebs-external-snapshotter-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-leases-role + namespace: kube-system +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-leases-rolebinding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ebs-csi-leases-role +subjects: +- kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system + +--- + +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app: ebs-csi-controller + app.kubernetes.io/managed-by: kops + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + ports: + - name: metrics + port: 3301 + targetPort: 3301 + selector: + app: ebs-csi-controller + type: ClusterIP + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-node + namespace: kube-system +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-node + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: Exists + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + - auto + - hybrid + - key: node.kubernetes.io/instance-type + operator: NotIn + values: + - a1.medium + - a1.large + - a1.xlarge + - a1.2xlarge + - a1.4xlarge + containers: + - args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --csi-mount-point-prefix=/var/lib/kubelet/plugins/kubernetes.io/csi/ebs.csi.aws.com/ + - --logging-format=text + - --v=2 + env: + - name: AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE + value: IPv6 + - name: AWS_REGION + value: us-test-1 + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver:v1.38.1 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/aws-ebs-csi-driver + - pre-stop-hook + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + privileged: true + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=2 + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.12.0-eks-1-32-1 + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --mode=kubelet-registration-probe + initialDelaySeconds: 30 + periodSeconds: 90 + timeoutSeconds: 15 + name: node-driver-registrar + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - mountPath: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + name: probe-dir + - args: + - --csi-address=/csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.14.0-eks-1-32-1 + imagePullPolicy: IfNotPresent + name: liveness-probe + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + hostNetwork: false + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + securityContext: + fsGroup: 0 + runAsGroup: 0 + runAsNonRoot: false + runAsUser: 0 + serviceAccountName: ebs-csi-node-sa + terminationGracePeriodSeconds: 30 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + - emptyDir: {} + name: probe-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs-csi-controller + namespace: kube-system +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: ebs-csi-controller + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + kops.k8s.io/managed-by: kops + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + - auto + - hybrid + weight: 1 + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - ebs-csi-controller + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - controller + - --endpoint=$(CSI_ENDPOINT) + - --k8s-tag-cluster-id=minimal-ipv6.example.com + - --extra-tags=KubernetesCluster=minimal-ipv6.example.com + - --http-endpoint=0.0.0.0:3301 + - --batching=true + - --logging-format=text + - --v=5 + env: + - name: AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE + value: IPv6 + - name: AWS_REGION + value: us-test-1 + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: key_id + name: aws-secret + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: access_key + name: aws-secret + optional: true + - name: AWS_EC2_ENDPOINT + valueFrom: + configMapKeyRef: + key: endpoint + name: aws-meta + optional: true + image: public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver:v1.38.1 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + name: ebs-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + - containerPort: 3301 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --timeout=60s + - --csi-address=$(ADDRESS) + - --v=5 + - --feature-gates=Topology=true + - --extra-create-metadata + - --leader-election=true + - --default-fstype=ext4 + - --kube-api-qps=20 + - --kube-api-burst=100 + - --worker-threads=100 + - --retry-interval-max=30m + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v5.1.0-eks-1-32-1 + imagePullPolicy: IfNotPresent + name: csi-provisioner + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --timeout=60s + - --csi-address=$(ADDRESS) + - --v=5 + - --leader-election=true + - --kube-api-qps=20 + - --kube-api-burst=100 + - --worker-threads=100 + - --retry-interval-max=5m + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v4.7.0-eks-1-32-1 + imagePullPolicy: IfNotPresent + name: csi-attacher + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --timeout=60s + - --csi-address=$(ADDRESS) + - --v=5 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: public.ecr.aws/ebs-csi-driver/volume-modifier-for-k8s:v0.5.0 + imagePullPolicy: IfNotPresent + name: volumemodifier + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --timeout=60s + - --csi-address=$(ADDRESS) + - --v=5 + - --handle-volume-inuse-error=false + - --leader-election=true + - --kube-api-qps=20 + - --kube-api-burst=100 + - --workers=100 + - --retry-interval-max=30m + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer:v1.12.0-eks-1-32-1 + imagePullPolicy: IfNotPresent + name: csi-resizer + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=/csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.14.0-eks-1-32-1 + imagePullPolicy: IfNotPresent + name: liveness-probe + resources: + limits: + memory: 256Mi + requests: + cpu: 10m + memory: 40Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /csi + name: socket-dir + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + fsGroup: 1000 + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: ebs-csi-controller-sa + tolerations: + - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/name: aws-ebs-csi-driver + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + volumes: + - emptyDir: {} + name: socket-dir + +--- + +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: aws-ebs-csi-driver.addons.k8s.io + app.kubernetes.io/component: csi-driver + app.kubernetes.io/instance: aws-ebs-csi-driver + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-ebs-csi-driver + app.kubernetes.io/version: v1.38.1 + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + name: ebs.csi.aws.com +spec: + attachRequired: true + podInfoOnMount: false diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..4cb5b7b3d4187 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content @@ -0,0 +1,121 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: c0e588106cb050f6e2e8b10082da29b67937142de68a58f5d28d611169263131 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: 4866e83e02b7f63ed0f85012e2dcd375d859653f4a3cd11d0d9d0bf87cf27f8c + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: f11ce2ded189d046ecbfb7aeb1a8bd0439c1e215e7b3ce7a149fca99ba8d7c30 + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.11 + manifest: node-termination-handler.aws/k8s-1.11.yaml + manifestHash: 70fef25dcbf9ab956bb6f68a4041beafba76eea75071066fcb34459bc1c7035e + name: node-termination-handler.aws + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: admissionregistration.k8s.io + kind: MutatingWebhookConfiguration + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops + selector: + k8s-addon: node-termination-handler.aws + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 + - id: k8s-1.32 + manifest: networking.kindnet/k8s-1.32.yaml + manifestHash: 43ba729e334963108d9468ac9fe4ca08f3b99bc2ae19c4f830895438f453d684 + name: networking.kindnet + needsRollingUpdate: all + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0 + - id: k8s-1.18 + manifest: aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml + manifestHash: a26b686d2e5ed0161da6ff083a66024102cd57a5ceb6471155fe814e16ed9710 + name: aws-cloud-controller.addons.k8s.io + selector: + k8s-addon: aws-cloud-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.17 + manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml + manifestHash: 1d351cede39021a1754c872160d01c17bd0f58e1f24f735c1248306e976af056 + name: aws-ebs-csi-driver.addons.k8s.io + selector: + k8s-addon: aws-ebs-csi-driver.addons.k8s.io + version: 9.99.0 diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..a14c12dc2c5ae --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,384 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . fd00:ec2::253 { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + dns64 + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.11.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: fd00:5e4f:ce::a + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.8.9 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..8b02cd43fab0a --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.31.0-beta.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.31.0-beta.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv6 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.31.0-beta.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..7f92a8042cb54 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,227 @@ +apiVersion: v1 +data: + config.yaml: | + {"clusterName":"minimal-ipv6.example.com","cloud":"aws","configBase":"memfs://clusters.example.com/minimal-ipv6.example.com","secretStore":"memfs://clusters.example.com/minimal-ipv6.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal-ipv6.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]},"enableCloudIPAM":true} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.31.0-beta.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal-ipv6.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.31.0-beta.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + - name: KOPS_RUN_TOO_NEW_VERSION + value: "1" + image: registry.k8s.io/kops/kops-controller:1.31.0-beta.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..36761e1c56255 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..4dcdce48b9ab9 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.kindnet-k8s-1.32_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.kindnet-k8s-1.32_content new file mode 100644 index 0000000000000..24565e94fc329 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.kindnet-k8s-1.32_content @@ -0,0 +1,164 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.kindnet + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: kindnet +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - patch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.kindnet + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: kindnet +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kindnet +subjects: +- kind: ServiceAccount + name: kindnet + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.kindnet + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: kindnet + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.kindnet + app: kindnet + app.kubernetes.io/managed-by: kops + k8s-app: kindnet + role.kubernetes.io/networking: "1" + tier: node + name: kindnet + namespace: kube-system +spec: + selector: + matchLabels: + app: kindnet + template: + metadata: + creationTimestamp: null + labels: + app: kindnet + k8s-app: kindnet + kops.k8s.io/managed-by: kops + tier: node + spec: + containers: + - args: + - /bin/kindnetd + - --hostname-override=$(NODE_NAME) + - --v=2 + - --dns-caching=true + - --nat64=false + - --fastpath-threshold=0 + - --masquerading=false + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: ghcr.io/aojea/kindnetd:v1.8.0 + name: kindnet-cni + resources: + requests: + cpu: 100m + memory: 50Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/cni/net.d + name: cni-cfg + - mountPath: /var/lib/cni-kindnet + name: var-lib-kindnet + hostNetwork: true + initContainers: + - command: + - sh + - -c + - cat /opt/cni/bin/cni-kindnet > /cni/cni-kindnet ; chmod +x /cni/cni-kindnet + image: ghcr.io/aojea/kindnetd:v1.8.0 + name: install-cni-bin + volumeMounts: + - mountPath: /cni + name: cni-bin + serviceAccountName: kindnet + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-bin + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: cni-cfg + - hostPath: + path: /var/lib/cni-kindnet + type: DirectoryOrCreate + name: var-lib-kindnet diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-node-termination-handler.aws-k8s-1.11_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-node-termination-handler.aws-k8s-1.11_content new file mode 100644 index 0000000000000..2b06150d67823 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-node-termination-handler.aws-k8s-1.11_content @@ -0,0 +1,285 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.22.0 + k8s-addon: node-termination-handler.aws + k8s-app: aws-node-termination-handler + name: aws-node-termination-handler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.22.0 + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get +- apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - get +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.22.0 + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: aws-node-termination-handler +subjects: +- kind: ServiceAccount + name: aws-node-termination-handler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/component: deployment + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + app.kubernetes.io/part-of: aws-node-termination-handler + app.kubernetes.io/version: v1.22.0 + k8s-addon: node-termination-handler.aws + k8s-app: aws-node-termination-handler + name: aws-node-termination-handler + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kubernetes.io/os: linux + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: deployment + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + k8s-app: aws-node-termination-handler + kops.k8s.io/managed-by: kops + kops.k8s.io/nth-mode: sqs + kubernetes.io/os: linux + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ENABLE_PROBES_SERVER + value: "true" + - name: PROBES_SERVER_PORT + value: "8080" + - name: PROBES_SERVER_ENDPOINT + value: /healthz + - name: LOG_LEVEL + value: info + - name: JSON_LOGGING + value: "true" + - name: LOG_FORMAT_VERSION + value: "2" + - name: ENABLE_PROMETHEUS_SERVER + value: "false" + - name: PROMETHEUS_SERVER_PORT + value: "9092" + - name: CHECK_TAG_BEFORE_DRAINING + value: "true" + - name: MANAGED_TAG + value: aws-node-termination-handler/managed + - name: USE_PROVIDER_ID + value: "true" + - name: DRY_RUN + value: "false" + - name: CORDON_ONLY + value: "false" + - name: TAINT_NODE + value: "false" + - name: EXCLUDE_FROM_LOAD_BALANCERS + value: "true" + - name: DELETE_LOCAL_DATA + value: "true" + - name: IGNORE_DAEMON_SETS + value: "true" + - name: POD_TERMINATION_GRACE_PERIOD + value: "-1" + - name: NODE_TERMINATION_GRACE_PERIOD + value: "120" + - name: EMIT_KUBERNETES_EVENTS + value: "true" + - name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS + value: "-1" + - name: ENABLE_SQS_TERMINATION_DRAINING + value: "true" + - name: QUEUE_URL + value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-ipv6-example-com-nth + - name: DELETE_SQS_MSG_IF_NODE_NOT_FOUND + value: "false" + - name: WORKERS + value: "10" + image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.22.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + name: aws-node-termination-handler + ports: + - containerPort: 8080 + name: liveness-probe + protocol: TCP + - containerPort: 9092 + name: metrics + protocol: TCP + resources: + requests: + cpu: 50m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + securityContext: + fsGroup: 1000 + serviceAccountName: aws-node-termination-handler + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: node-termination-handler.aws + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: aws-node-termination-handler + k8s-addon: node-termination-handler.aws + name: aws-node-termination-handler + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/instance: aws-node-termination-handler + app.kubernetes.io/name: aws-node-termination-handler + kops.k8s.io/nth-mode: sqs diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000000..bea3e88be3ce7 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_minimal-ipv6.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,118 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-csi-1-21 +parameters: + encrypted: "true" + type: gp3 +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000000..4d948f785f9ac --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,329 @@ +APIServerConfig: + API: + publicName: api.minimal-ipv6.example.com + ClusterDNSDomain: cluster.local + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: '::' + cloudProvider: external + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.32.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com + serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks + serviceClusterIPRange: fd00:5e4f:ce::/108 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 5ad4965598773d56a37a8e8429c3dc3d86b4c5c26d8417ab333ae345c053dae2@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet + - 646d58f6d98ee670a71d9cdffbf6625aeea2849d567f214bc43a35f8ccb7bf70@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl + - 5035d7814c95cd3cedbc5efb447ef25a4942ef05caab2159746d55ce1698c74a@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/amd64/ecr-credential-provider-linux-amd64 + - 2503ce29ac445715ebe146073f45468153f9e28f45fa173cb060cfd9e735f563@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz,https://github.com/containernetworking/plugins/releases/download/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz + - 1a94f15139f37633f39e24f08a4071f4533b285df3cbee6478972d26147bcaef@https://github.com/containerd/containerd/releases/download/v1.7.24/containerd-1.7.24-linux-amd64.tar.gz + - e83565aa78ec8f52a4d2b4eb6c4ca262b74c5f6770c1f43670c3029c20175502@https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.amd64 + - 71aee9d987b7fad0ff2ade50b038ad7e2356324edc02c54045960a3521b3e6a7@https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-amd64.tar.gz + - d16a1ffb3938f5a19d5c8f45d363bd091ef89c0bc4d44ad16b933eede32fdcbb@https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet + - ba4004f98f3d3a7b7d2954ff0a424caa2c2b06b78c17b1dccf2acc76a311a896@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl + - b3d567bda9e2996fc1fbd9d13506bd16763d3865b5c7b0b3c4b48c6088c04481@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/arm64/ecr-credential-provider-linux-arm64 + - f0f440b968ab50ad13d9d42d993ba98ec30b2ec666846f4ef1bddc7646a701cc@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz,https://github.com/containernetworking/plugins/releases/download/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz + - 420406d2b34ebb422ab3755fbeede59bf3bfcfccf5cfa584b558c93769d99064@https://github.com/containerd/containerd/releases/download/v1.7.24/containerd-1.7.24-linux-arm64.tar.gz + - 285f6c4c3de1d78d9f536a0299ae931219527b2ebd9ad89df5a1072896b7e82a@https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.arm64 + - d8df47708ca57b9cd7f498055126ba7dcfc811d9ba43aae1830c93a09e70e22d@https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-arm64.tar.gz + - 0b615cfa00c331fb9c4524f3d4058a61cc487b33a3436d1269e7832cf283f925@https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-arm64.tar.gz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: minimal-ipv6.example.com +ControlPlaneConfig: + KubeControllerManager: + allocateNodeCIDRs: false + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterName: minimal-ipv6.example.com + configureCloudRoutes: false + controllers: + - '*' + - -nodeipam + image: registry.k8s.io/kube-controller-manager:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + KubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 +DNSZone: Z1AFAKE1ZON3YO +EtcdClusterNames: +- main +- events +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +InstallCNIAssets: true +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeProxy: + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.32.0 + logLevel: 2 +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: fd00:5e4f:ce::a + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.9 + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/control-plane=:NoSchedule +KubernetesVersion: 1.32.0 +Networking: + kindnet: {} + nonMasqueradeCIDR: ::/0 + serviceClusterIPRange: fd00:5e4f:ce::/108 +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/minimal-ipv6.example.com/addons/bootstrap-channel.yaml +configStore: + keypairs: memfs://clusters.example.com/minimal-ipv6.example.com/pki + secrets: memfs://clusters.example.com/minimal-ipv6.example.com/secrets +containerdConfig: + logLevel: info + runc: + version: 1.2.4 + version: 1.7.24 +etcdManifests: +- memfs://clusters.example.com/minimal-ipv6.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/minimal-ipv6.example.com/manifests/etcd/events-master-us-test-1a.yaml +nodeIPFamilies: +- ipv6 +- ipv4 +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml +usesLegacyGossip: false +usesNoneDNS: false diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..30add7ca0c532 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,63 @@ +Assets: + amd64: + - 5ad4965598773d56a37a8e8429c3dc3d86b4c5c26d8417ab333ae345c053dae2@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet + - 646d58f6d98ee670a71d9cdffbf6625aeea2849d567f214bc43a35f8ccb7bf70@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl + - 5035d7814c95cd3cedbc5efb447ef25a4942ef05caab2159746d55ce1698c74a@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/amd64/ecr-credential-provider-linux-amd64 + - 2503ce29ac445715ebe146073f45468153f9e28f45fa173cb060cfd9e735f563@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz,https://github.com/containernetworking/plugins/releases/download/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz + - 1a94f15139f37633f39e24f08a4071f4533b285df3cbee6478972d26147bcaef@https://github.com/containerd/containerd/releases/download/v1.7.24/containerd-1.7.24-linux-amd64.tar.gz + - e83565aa78ec8f52a4d2b4eb6c4ca262b74c5f6770c1f43670c3029c20175502@https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.amd64 + - 71aee9d987b7fad0ff2ade50b038ad7e2356324edc02c54045960a3521b3e6a7@https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-amd64.tar.gz + - d16a1ffb3938f5a19d5c8f45d363bd091ef89c0bc4d44ad16b933eede32fdcbb@https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-amd64.tar.gz + arm64: + - bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet + - ba4004f98f3d3a7b7d2954ff0a424caa2c2b06b78c17b1dccf2acc76a311a896@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl,https://cdn.dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl + - b3d567bda9e2996fc1fbd9d13506bd16763d3865b5c7b0b3c4b48c6088c04481@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.27.1/linux/arm64/ecr-credential-provider-linux-arm64 + - f0f440b968ab50ad13d9d42d993ba98ec30b2ec666846f4ef1bddc7646a701cc@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz,https://github.com/containernetworking/plugins/releases/download/v1.6.1/cni-plugins-linux-arm64-v1.6.1.tgz + - 420406d2b34ebb422ab3755fbeede59bf3bfcfccf5cfa584b558c93769d99064@https://github.com/containerd/containerd/releases/download/v1.7.24/containerd-1.7.24-linux-arm64.tar.gz + - 285f6c4c3de1d78d9f536a0299ae931219527b2ebd9ad89df5a1072896b7e82a@https://github.com/opencontainers/runc/releases/download/v1.2.4/runc.arm64 + - d8df47708ca57b9cd7f498055126ba7dcfc811d9ba43aae1830c93a09e70e22d@https://github.com/containerd/nerdctl/releases/download/v1.7.4/nerdctl-1.7.4-linux-arm64.tar.gz + - 0b615cfa00c331fb9c4524f3d4058a61cc487b33a3436d1269e7832cf283f925@https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.29.0/crictl-v1.29.0-linux-arm64.tar.gz +CAs: {} +ClusterName: minimal-ipv6.example.com +Hooks: +- null +- null +InstallCNIAssets: true +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeProxy: + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.32.0 + logLevel: 2 +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: fd00:5e4f:ce::a + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.9 + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +KubernetesVersion: 1.32.0 +Networking: + kindnet: {} + nonMasqueradeCIDR: ::/0 + serviceClusterIPRange: fd00:5e4f:ce::/108 +UpdatePolicy: automatic +containerdConfig: + logLevel: info + runc: + version: 1.2.4 + version: 1.7.24 +usesLegacyGossip: false +usesNoneDNS: false diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_sqs_queue_minimal-ipv6-example-com-nth_policy b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_sqs_queue_minimal-ipv6-example-com-nth_policy new file mode 100644 index 0000000000000..79e46eb4932b9 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/data/aws_sqs_queue_minimal-ipv6-example-com-nth_policy @@ -0,0 +1,16 @@ +{ + "Statement": [ + { + "Action": "sqs:SendMessage", + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com", + "sqs.amazonaws.com" + ] + }, + "Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-ipv6-example-com-nth" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/id_rsa.pub b/tests/integration/update_cluster/minimal-ipv6-kindnet/id_rsa.pub new file mode 100755 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/in-v1alpha2.yaml b/tests/integration/update_cluster/minimal-ipv6-kindnet/in-v1alpha2.yaml new file mode 100644 index 0000000000000..afcfce451faae --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/in-v1alpha2.yaml @@ -0,0 +1,109 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-10T22:42:27Z" + name: minimal-ipv6.example.com +spec: + api: + loadBalancer: + type: Public + class: Network + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: true + cloudControllerManager: {} + cloudProvider: aws + configBase: memfs://clusters.example.com/minimal-ipv6.example.com + etcdClusters: + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + iam: {} + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: v1.32.0 + masterPublicName: api.minimal-ipv6.example.com + networkCIDR: 172.20.0.0/16 + networking: + kindnet: {} + nonMasqueradeCIDR: ::/0 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - ipv6CIDR: 2001:db8:0:111::/64 + name: us-test-1a + type: Private + zone: us-test-1a + - ipv6CIDR: 2001:db8:0:112::/64 + name: us-test-1b + type: Private + zone: us-test-1b + - cidr: 172.20.32.0/19 + ipv6CIDR: 2001:db8:0:113::/64 + name: dualstack-us-test-1a + type: DualStack + zone: us-test-1a + - cidr: 172.20.64.0/19 + ipv6CIDR: 2001:db8:0:114::/64 + name: dualstack-us-test-1b + type: DualStack + zone: us-test-1b + - cidr: 172.20.0.0/22 + ipv6CIDR: 2001:db8:0:115::/64 + name: utility-us-test-1a + type: Utility + zone: us-test-1a + - cidr: 172.20.4.0/22 + ipv6CIDR: 2001:db8:0:116::/64 + name: utility-us-test-1b + type: Utility + zone: us-test-1b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: nodes + labels: + kops.k8s.io/cluster: minimal-ipv6.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: t3.medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test-1a + - us-test-1b + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-10T22:42:28Z" + name: master-us-test-1a + labels: + kops.k8s.io/cluster: minimal-ipv6.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - dualstack-us-test-1a diff --git a/tests/integration/update_cluster/minimal-ipv6-kindnet/kubernetes.tf b/tests/integration/update_cluster/minimal-ipv6-kindnet/kubernetes.tf new file mode 100644 index 0000000000000..4b7fd12f764b0 --- /dev/null +++ b/tests/integration/update_cluster/minimal-ipv6-kindnet/kubernetes.tf @@ -0,0 +1,1378 @@ +locals { + cluster_name = "minimal-ipv6.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-ipv6-example-com.id] + master_security_group_ids = [aws_security_group.masters-minimal-ipv6-example-com.id] + masters_role_arn = aws_iam_role.masters-minimal-ipv6-example-com.arn + masters_role_name = aws_iam_role.masters-minimal-ipv6-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-ipv6-example-com.id] + node_security_group_ids = [aws_security_group.nodes-minimal-ipv6-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-minimal-ipv6-example-com.id, aws_subnet.us-test-1b-minimal-ipv6-example-com.id] + nodes_role_arn = aws_iam_role.nodes-minimal-ipv6-example-com.arn + nodes_role_name = aws_iam_role.nodes-minimal-ipv6-example-com.name + region = "us-test-1" + route_table_private-us-test-1a_id = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id + route_table_private-us-test-1b_id = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id + route_table_public_id = aws_route_table.minimal-ipv6-example-com.id + subnet_dualstack-us-test-1a_id = aws_subnet.dualstack-us-test-1a-minimal-ipv6-example-com.id + subnet_dualstack-us-test-1b_id = aws_subnet.dualstack-us-test-1b-minimal-ipv6-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-ipv6-example-com.id + subnet_us-test-1b_id = aws_subnet.us-test-1b-minimal-ipv6-example-com.id + subnet_utility-us-test-1a_id = aws_subnet.utility-us-test-1a-minimal-ipv6-example-com.id + subnet_utility-us-test-1b_id = aws_subnet.utility-us-test-1b-minimal-ipv6-example-com.id + vpc_cidr_block = aws_vpc.minimal-ipv6-example-com.cidr_block + vpc_id = aws_vpc.minimal-ipv6-example-com.id + vpc_ipv6_cidr_block = aws_vpc.minimal-ipv6-example-com.ipv6_cidr_block + vpc_ipv6_cidr_length = local.vpc_ipv6_cidr_block == "" ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) +} + +output "cluster_name" { + value = "minimal-ipv6.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-ipv6-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-minimal-ipv6-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-minimal-ipv6-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-minimal-ipv6-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-minimal-ipv6-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-minimal-ipv6-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-minimal-ipv6-example-com.id, aws_subnet.us-test-1b-minimal-ipv6-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-minimal-ipv6-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-minimal-ipv6-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_private-us-test-1a_id" { + value = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id +} + +output "route_table_private-us-test-1b_id" { + value = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id +} + +output "route_table_public_id" { + value = aws_route_table.minimal-ipv6-example-com.id +} + +output "subnet_dualstack-us-test-1a_id" { + value = aws_subnet.dualstack-us-test-1a-minimal-ipv6-example-com.id +} + +output "subnet_dualstack-us-test-1b_id" { + value = aws_subnet.dualstack-us-test-1b-minimal-ipv6-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-minimal-ipv6-example-com.id +} + +output "subnet_us-test-1b_id" { + value = aws_subnet.us-test-1b-minimal-ipv6-example-com.id +} + +output "subnet_utility-us-test-1a_id" { + value = aws_subnet.utility-us-test-1a-minimal-ipv6-example-com.id +} + +output "subnet_utility-us-test-1b_id" { + value = aws_subnet.utility-us-test-1b-minimal-ipv6-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.minimal-ipv6-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.minimal-ipv6-example-com.id +} + +output "vpc_ipv6_cidr_block" { + value = aws_vpc.minimal-ipv6-example-com.ipv6_cidr_block +} + +output "vpc_ipv6_cidr_length" { + value = local.vpc_ipv6_cidr_block == "" ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0]) +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-ipv6-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-minimal-ipv6-example-com.id + version = aws_launch_template.master-us-test-1a-masters-minimal-ipv6-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.minimal-ipv6.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "minimal-ipv6.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.minimal-ipv6.example.com" + } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/control-plane" + propagate_at_launch = true + value = "1" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/minimal-ipv6.example.com" + propagate_at_launch = true + value = "owned" + } + target_group_arns = [aws_lb_target_group.tcp-minimal-ipv6-example--bne5ih.id] + vpc_zone_identifier = [aws_subnet.dualstack-us-test-1a-minimal-ipv6-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-minimal-ipv6-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-minimal-ipv6-example-com.id + version = aws_launch_template.nodes-minimal-ipv6-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.minimal-ipv6.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "minimal-ipv6.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.minimal-ipv6.example.com" + } + tag { + key = "aws-node-termination-handler/managed" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/minimal-ipv6.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-ipv6-example-com.id, aws_subnet.us-test-1b-minimal-ipv6-example-com.id] +} + +resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-ipv6-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "master-us-test-1a-NTHLifecycleHook" +} + +resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" { + autoscaling_group_name = aws_autoscaling_group.nodes-minimal-ipv6-example-com.id + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" + name = "nodes-NTHLifecycleHook" +} + +resource "aws_cloudwatch_event_rule" "minimal-ipv6-example-com-ASGLifecycle" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-ASGLifecycle_event_pattern") + name = "minimal-ipv6.example.com-ASGLifecycle" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com-ASGLifecycle" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-ipv6-example-com-InstanceScheduledChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceScheduledChange_event_pattern") + name = "minimal-ipv6.example.com-InstanceScheduledChange" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com-InstanceScheduledChange" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-ipv6-example-com-InstanceStateChange" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-InstanceStateChange_event_pattern") + name = "minimal-ipv6.example.com-InstanceStateChange" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com-InstanceStateChange" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_rule" "minimal-ipv6-example-com-SpotInterruption" { + event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal-ipv6.example.com-SpotInterruption_event_pattern") + name = "minimal-ipv6.example.com-SpotInterruption" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com-SpotInterruption" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_cloudwatch_event_target" "minimal-ipv6-example-com-ASGLifecycle-Target" { + arn = aws_sqs_queue.minimal-ipv6-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-ipv6-example-com-ASGLifecycle.id +} + +resource "aws_cloudwatch_event_target" "minimal-ipv6-example-com-InstanceScheduledChange-Target" { + arn = aws_sqs_queue.minimal-ipv6-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-ipv6-example-com-InstanceScheduledChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-ipv6-example-com-InstanceStateChange-Target" { + arn = aws_sqs_queue.minimal-ipv6-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-ipv6-example-com-InstanceStateChange.id +} + +resource "aws_cloudwatch_event_target" "minimal-ipv6-example-com-SpotInterruption-Target" { + arn = aws_sqs_queue.minimal-ipv6-example-com-nth.arn + rule = aws_cloudwatch_event_rule.minimal-ipv6-example-com-SpotInterruption.id +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-ipv6-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1a.etcd-events.minimal-ipv6.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-ipv6-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1a.etcd-main.minimal-ipv6.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_egress_only_internet_gateway" "minimal-ipv6-example-com" { + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_eip" "us-test-1a-minimal-ipv6-example-com" { + domain = "vpc" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1a.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_eip" "us-test-1b-minimal-ipv6-example-com" { + domain = "vpc" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1b.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "masters-minimal-ipv6-example-com" { + name = "masters.minimal-ipv6.example.com" + role = aws_iam_role.masters-minimal-ipv6-example-com.name + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "masters.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-minimal-ipv6-example-com" { + name = "nodes.minimal-ipv6.example.com" + role = aws_iam_role.nodes-minimal-ipv6-example-com.name + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "nodes.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-minimal-ipv6-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal-ipv6.example.com_policy") + name = "masters.minimal-ipv6.example.com" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "masters.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-minimal-ipv6-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal-ipv6.example.com_policy") + name = "nodes.minimal-ipv6.example.com" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "nodes.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "masters-minimal-ipv6-example-com" { + name = "masters.minimal-ipv6.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal-ipv6.example.com_policy") + role = aws_iam_role.masters-minimal-ipv6-example-com.name +} + +resource "aws_iam_role_policy" "nodes-minimal-ipv6-example-com" { + name = "nodes.minimal-ipv6.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal-ipv6.example.com_policy") + role = aws_iam_role.nodes-minimal-ipv6-example-com.name +} + +resource "aws_internet_gateway" "minimal-ipv6-example-com" { + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_key_pair" "kubernetes-minimal-ipv6-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.minimal-ipv6.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal-ipv6.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-minimal-ipv6-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-minimal-ipv6-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-minimal-ipv6-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "enabled" + http_put_response_hop_limit = 1 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.minimal-ipv6.example.com" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 1 + security_groups = [aws_security_group.masters-minimal-ipv6-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "master-us-test-1a.masters.minimal-ipv6.example.com" + "aws-node-termination-handler/managed" = "" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "master-us-test-1a.masters.minimal-ipv6.example.com" + "aws-node-termination-handler/managed" = "" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "master-us-test-1a.masters.minimal-ipv6.example.com" + "aws-node-termination-handler/managed" = "" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal-ipv6.example.com_user_data") +} + +resource "aws_launch_template" "nodes-minimal-ipv6-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-minimal-ipv6-example-com.id + } + image_id = "ami-12345678" + instance_type = "t3.medium" + key_name = aws_key_pair.kubernetes-minimal-ipv6-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "enabled" + http_put_response_hop_limit = 1 + http_tokens = "required" + } + monitoring { + enabled = false + } + name = "nodes.minimal-ipv6.example.com" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 1 + security_groups = [aws_security_group.nodes-minimal-ipv6-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "nodes.minimal-ipv6.example.com" + "aws-node-termination-handler/managed" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "nodes.minimal-ipv6.example.com" + "aws-node-termination-handler/managed" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "nodes.minimal-ipv6.example.com" + "aws-node-termination-handler/managed" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal-ipv6.example.com_user_data") +} + +resource "aws_lb" "api-minimal-ipv6-example-com" { + enable_cross_zone_load_balancing = false + internal = false + ip_address_type = "dualstack" + load_balancer_type = "network" + name = "api-minimal-ipv6-example--jhj9te" + security_groups = [aws_security_group.api-elb-minimal-ipv6-example-com.id] + subnet_mapping { + subnet_id = aws_subnet.utility-us-test-1a-minimal-ipv6-example-com.id + } + subnet_mapping { + subnet_id = aws_subnet.utility-us-test-1b-minimal-ipv6-example-com.id + } + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "api.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_lb_listener" "api-minimal-ipv6-example-com-443" { + default_action { + target_group_arn = aws_lb_target_group.tcp-minimal-ipv6-example--bne5ih.id + type = "forward" + } + load_balancer_arn = aws_lb.api-minimal-ipv6-example-com.id + port = 443 + protocol = "TCP" +} + +resource "aws_lb_target_group" "tcp-minimal-ipv6-example--bne5ih" { + connection_termination = "true" + deregistration_delay = "30" + health_check { + healthy_threshold = 2 + interval = 10 + protocol = "TCP" + unhealthy_threshold = 2 + } + name = "tcp-minimal-ipv6-example--bne5ih" + port = 443 + protocol = "TCP" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "tcp-minimal-ipv6-example--bne5ih" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_nat_gateway" "us-test-1a-minimal-ipv6-example-com" { + allocation_id = aws_eip.us-test-1a-minimal-ipv6-example-com.id + subnet_id = aws_subnet.utility-us-test-1a-minimal-ipv6-example-com.id + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1a.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_nat_gateway" "us-test-1b-minimal-ipv6-example-com" { + allocation_id = aws_eip.us-test-1b-minimal-ipv6-example-com.id + subnet_id = aws_subnet.utility-us-test-1b-minimal-ipv6-example-com.id + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1b.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.minimal-ipv6-example-com.id + route_table_id = aws_route_table.minimal-ipv6-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.minimal-ipv6-example-com.id + route_table_id = aws_route_table.minimal-ipv6-example-com.id +} + +resource "aws_route" "route-private-us-test-1a-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.us-test-1a-minimal-ipv6-example-com.id + route_table_id = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id +} + +resource "aws_route" "route-private-us-test-1a-64_ff9b__--96" { + destination_ipv6_cidr_block = "64:ff9b::/96" + nat_gateway_id = aws_nat_gateway.us-test-1a-minimal-ipv6-example-com.id + route_table_id = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id +} + +resource "aws_route" "route-private-us-test-1a-__--0" { + destination_ipv6_cidr_block = "::/0" + egress_only_gateway_id = aws_egress_only_internet_gateway.minimal-ipv6-example-com.id + route_table_id = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id +} + +resource "aws_route" "route-private-us-test-1b-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.us-test-1b-minimal-ipv6-example-com.id + route_table_id = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id +} + +resource "aws_route" "route-private-us-test-1b-64_ff9b__--96" { + destination_ipv6_cidr_block = "64:ff9b::/96" + nat_gateway_id = aws_nat_gateway.us-test-1b-minimal-ipv6-example-com.id + route_table_id = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id +} + +resource "aws_route" "route-private-us-test-1b-__--0" { + destination_ipv6_cidr_block = "::/0" + egress_only_gateway_id = aws_egress_only_internet_gateway.minimal-ipv6-example-com.id + route_table_id = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id +} + +resource "aws_route53_record" "api-minimal-ipv6-example-com" { + alias { + evaluate_target_health = false + name = aws_lb.api-minimal-ipv6-example-com.dns_name + zone_id = aws_lb.api-minimal-ipv6-example-com.zone_id + } + name = "api.minimal-ipv6.example.com" + type = "A" + zone_id = "/hostedzone/Z1AFAKE1ZON3YO" +} + +resource "aws_route53_record" "api-minimal-ipv6-example-com-AAAA" { + alias { + evaluate_target_health = false + name = aws_lb.api-minimal-ipv6-example-com.dns_name + zone_id = aws_lb.api-minimal-ipv6-example-com.zone_id + } + name = "api.minimal-ipv6.example.com" + type = "AAAA" + zone_id = "/hostedzone/Z1AFAKE1ZON3YO" +} + +resource "aws_route_table" "minimal-ipv6-example-com" { + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_route_table" "private-us-test-1a-minimal-ipv6-example-com" { + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "private-us-test-1a.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/kops/role" = "private-us-test-1a" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_route_table" "private-us-test-1b-minimal-ipv6-example-com" { + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "private-us-test-1b.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/kops/role" = "private-us-test-1b" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_route_table_association" "private-dualstack-us-test-1a-minimal-ipv6-example-com" { + route_table_id = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id + subnet_id = aws_subnet.dualstack-us-test-1a-minimal-ipv6-example-com.id +} + +resource "aws_route_table_association" "private-dualstack-us-test-1b-minimal-ipv6-example-com" { + route_table_id = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id + subnet_id = aws_subnet.dualstack-us-test-1b-minimal-ipv6-example-com.id +} + +resource "aws_route_table_association" "private-us-test-1a-minimal-ipv6-example-com" { + route_table_id = aws_route_table.private-us-test-1a-minimal-ipv6-example-com.id + subnet_id = aws_subnet.us-test-1a-minimal-ipv6-example-com.id +} + +resource "aws_route_table_association" "private-us-test-1b-minimal-ipv6-example-com" { + route_table_id = aws_route_table.private-us-test-1b-minimal-ipv6-example-com.id + subnet_id = aws_subnet.us-test-1b-minimal-ipv6-example-com.id +} + +resource "aws_route_table_association" "utility-us-test-1a-minimal-ipv6-example-com" { + route_table_id = aws_route_table.minimal-ipv6-example-com.id + subnet_id = aws_subnet.utility-us-test-1a-minimal-ipv6-example-com.id +} + +resource "aws_route_table_association" "utility-us-test-1b-minimal-ipv6-example-com" { + route_table_id = aws_route_table.minimal-ipv6-example-com.id + subnet_id = aws_subnet.utility-us-test-1b-minimal-ipv6-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/minimal-ipv6.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/minimal-ipv6.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/minimal-ipv6.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/minimal-ipv6.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/minimal-ipv6.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/minimal-ipv6.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/minimal-ipv6.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-aws-cloud-controller-addons-k8s-io-k8s-1-18" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-aws-ebs-csi-driver-addons-k8s-io-k8s-1-17" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-bootstrap_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-networking-kindnet-k8s-1-32" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-networking.kindnet-k8s-1.32_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/networking.kindnet/k8s-1.32.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-node-termination-handler-aws-k8s-1-11" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-node-termination-handler.aws-k8s-1.11_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/node-termination-handler.aws/k8s-1.11.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "minimal-ipv6-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_minimal-ipv6.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/minimal-ipv6.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/minimal-ipv6.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/minimal-ipv6.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "api-elb-minimal-ipv6-example-com" { + description = "Security group for api ELB" + name = "api-elb.minimal-ipv6.example.com" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "api-elb.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_security_group" "masters-minimal-ipv6-example-com" { + description = "Security group for masters" + name = "masters.minimal-ipv6.example.com" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "masters.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_security_group" "nodes-minimal-ipv6-example-com" { + description = "Security group for nodes" + name = "nodes.minimal-ipv6.example.com" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "nodes.minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-ipv6-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-ipv6-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-api-elb-minimal-ipv6-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-masters-minimal-ipv6-example-com" { + from_port = 22 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-nodes-minimal-ipv6-example-com" { + from_port = 22 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-__--0-ingress-tcp-443to443-api-elb-minimal-ipv6-example-com" { + from_port = 443 + ipv6_cidr_blocks = ["::/0"] + protocol = "tcp" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-api-elb-minimal-ipv6-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-api-elb-minimal-ipv6-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-ipv6-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-ipv6-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-minimal-ipv6-example-com-ingress-all-0to0-masters-minimal-ipv6-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-minimal-ipv6-example-com-ingress-all-0to0-nodes-minimal-ipv6-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-ingress-all-0to0-nodes-minimal-ipv6-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-ingress-tcp-1to2379-masters-minimal-ipv6-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-ingress-tcp-2382to4000-masters-minimal-ipv6-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-ingress-tcp-4003to65535-masters-minimal-ipv6-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-minimal-ipv6-example-com-ingress-udp-1to65535-masters-minimal-ipv6-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.nodes-minimal-ipv6-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "https-elb-to-master" { + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-cp-to-elb" { + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-elb-to-cp" { + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.masters-minimal-ipv6-example-com.id + source_security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmpv6-pmtu-api-elb-__--0" { + from_port = -1 + ipv6_cidr_blocks = ["::/0"] + protocol = "icmpv6" + security_group_id = aws_security_group.api-elb-minimal-ipv6-example-com.id + to_port = -1 + type = "ingress" +} + +resource "aws_sqs_queue" "minimal-ipv6-example-com-nth" { + message_retention_seconds = 300 + name = "minimal-ipv6-example-com-nth" + policy = file("${path.module}/data/aws_sqs_queue_minimal-ipv6-example-com-nth_policy") + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6-example-com-nth" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_subnet" "dualstack-us-test-1a-minimal-ipv6-example-com" { + assign_ipv6_address_on_creation = true + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + enable_resource_name_dns_a_record_on_launch = true + enable_resource_name_dns_aaaa_record_on_launch = true + ipv6_cidr_block = "2001:db8:0:113::/64" + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "dualstack-us-test-1a.minimal-ipv6.example.com" + "SubnetType" = "DualStack" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_subnet" "dualstack-us-test-1b-minimal-ipv6-example-com" { + assign_ipv6_address_on_creation = true + availability_zone = "us-test-1b" + cidr_block = "172.20.64.0/19" + enable_resource_name_dns_a_record_on_launch = true + enable_resource_name_dns_aaaa_record_on_launch = true + ipv6_cidr_block = "2001:db8:0:114::/64" + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "dualstack-us-test-1b.minimal-ipv6.example.com" + "SubnetType" = "DualStack" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_subnet" "us-test-1a-minimal-ipv6-example-com" { + assign_ipv6_address_on_creation = true + availability_zone = "us-test-1a" + enable_dns64 = true + enable_resource_name_dns_aaaa_record_on_launch = true + ipv6_cidr_block = "2001:db8:0:111::/64" + ipv6_native = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1a.minimal-ipv6.example.com" + "SubnetType" = "Private" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_subnet" "us-test-1b-minimal-ipv6-example-com" { + assign_ipv6_address_on_creation = true + availability_zone = "us-test-1b" + enable_dns64 = true + enable_resource_name_dns_aaaa_record_on_launch = true + ipv6_cidr_block = "2001:db8:0:112::/64" + ipv6_native = true + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "us-test-1b.minimal-ipv6.example.com" + "SubnetType" = "Private" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_subnet" "utility-us-test-1a-minimal-ipv6-example-com" { + assign_ipv6_address_on_creation = true + availability_zone = "us-test-1a" + cidr_block = "172.20.0.0/22" + enable_resource_name_dns_a_record_on_launch = true + enable_resource_name_dns_aaaa_record_on_launch = true + ipv6_cidr_block = "2001:db8:0:115::/64" + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "utility-us-test-1a.minimal-ipv6.example.com" + "SubnetType" = "Utility" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_subnet" "utility-us-test-1b-minimal-ipv6-example-com" { + assign_ipv6_address_on_creation = true + availability_zone = "us-test-1b" + cidr_block = "172.20.4.0/22" + enable_resource_name_dns_a_record_on_launch = true + enable_resource_name_dns_aaaa_record_on_launch = true + ipv6_cidr_block = "2001:db8:0:116::/64" + private_dns_hostname_type_on_launch = "resource-name" + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "utility-us-test-1b.minimal-ipv6.example.com" + "SubnetType" = "Utility" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +resource "aws_vpc" "minimal-ipv6-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "minimal-ipv6-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "minimal-ipv6.example.com" + "Name" = "minimal-ipv6.example.com" + "kubernetes.io/cluster/minimal-ipv6.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "minimal-ipv6-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.minimal-ipv6-example-com.id + vpc_id = aws_vpc.minimal-ipv6-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 5.0.0" + } + } +} diff --git a/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-bootstrap_content index d9fe5ff8faf24..39e65c8361e8f 100644 --- a/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-bootstrap_content +++ b/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-bootstrap_content @@ -99,7 +99,7 @@ spec: version: 9.99.0 - id: k8s-1.32 manifest: networking.kindnet/k8s-1.32.yaml - manifestHash: fa9023e8e738acb584c86cfec376cab6ace06f314e896cdcc62a9a214b20eb11 + manifestHash: 6f423b54cac7857cdd53f1998b399e3a029e675569c7ac6ae7eaf3e0979095f1 name: networking.kindnet needsRollingUpdate: all selector: diff --git a/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-networking.kindnet-k8s-1.32_content b/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-networking.kindnet-k8s-1.32_content index a302a14fd4688..48b8c3c47d6d5 100644 --- a/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-networking.kindnet-k8s-1.32_content +++ b/tests/integration/update_cluster/privatekindnet/data/aws_s3_object_privatekindnet.example.com-addons-networking.kindnet-k8s-1.32_content @@ -104,6 +104,8 @@ spec: - /bin/kindnetd - --hostname-override=$(NODE_NAME) - --v=2 + - --dns-caching=true + - --nat64=false - --fastpath-threshold=0 - --masquerading=true - --no-masquerade-cidr=172.20.0.0/16,100.96.0.0/11,100.64.0.0/13 diff --git a/upup/models/cloudup/resources/addons/networking.kindnet/k8s-1.32.yaml.template b/upup/models/cloudup/resources/addons/networking.kindnet/k8s-1.32.yaml.template index 21498dac04246..e3f7542d1bc97 100644 --- a/upup/models/cloudup/resources/addons/networking.kindnet/k8s-1.32.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.kindnet/k8s-1.32.yaml.template @@ -105,35 +105,25 @@ spec: args: - /bin/kindnetd - --hostname-override=$(NODE_NAME) - {{- if .Networking.Kindnet.LogLevel }} - --v={{ .Networking.Kindnet.LogLevel }} - {{- end }} - {{- if .Networking.Kindnet.NetworkPolicies }} + {{ if .Networking.Kindnet.NetworkPolicies }} - --network-policy={{ .Networking.Kindnet.NetworkPolicies }} - {{- end }} - {{- if .Networking.Kindnet.AdminNetworkPolicies }} + {{ end }} + {{ if .Networking.Kindnet.AdminNetworkPolicies }} - --admin-network-policy={{ .Networking.Kindnet.AdminNetworkPolicies }} - {{- end }} - {{- if .Networking.Kindnet.BaselineAdminNetworkPolicies }} + {{ end }} + {{ if .Networking.Kindnet.BaselineAdminNetworkPolicies }} - --admin-network-policy={{ .Networking.Kindnet.BaselineAdminNetworkPolicies }} - {{- end }} - {{- if .Networking.Kindnet.DNSCaching }} - - --dns-caching={{ .Networking.Kindnet.DNSCaching }} - {{- end }} - {{- if .Networking.Kindnet.NAT64 }} - - --nat64={{ .Networking.Kindnet.NAT64 }} - {{- end }} - {{- if .Networking.Kindnet.FastPathThreshold }} + {{ end }} + - --dns-caching={{ WithDefaultBool .Networking.Kindnet.DNSCaching true }} + - --nat64={{ WithDefaultBool .Networking.Kindnet.NAT64 false }} - --fastpath-threshold={{ .Networking.Kindnet.FastPathThreshold }} - {{- end }} {{- if .Networking.Kindnet.Masquerade }} - {{- if .Networking.Kindnet.Masquerade.Enabled }} - - --masquerading={{ .Networking.Kindnet.Masquerade.Enabled }} - {{- end }} - {{- if .Networking.Kindnet.Masquerade.NonMasqueradeCIDRs }} + - --masquerading={{ WithDefaultBool .Networking.Kindnet.Masquerade.Enabled true }} + {{ if .Networking.Kindnet.Masquerade.NonMasqueradeCIDRs }} - --no-masquerade-cidr={{ range $index, $element := .Networking.Kindnet.Masquerade.NonMasqueradeCIDRs }}{{if $index}},{{end}}{{$element}}{{end}} {{- end }} - {{- end }} + {{ end }} env: - name: HOST_IP valueFrom: