Skip to content

Commit

Permalink
Merge pull request kubernetes-sigs#808 from dgrisonnet/e2e-ha
Browse files Browse the repository at this point in the history
Run e2e tests with high availability configuration
  • Loading branch information
k8s-ci-robot authored Sep 13, 2021
2 parents e7016ff + 4fefc3b commit 2586939
Show file tree
Hide file tree
Showing 7 changed files with 151 additions and 96 deletions.
9 changes: 8 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,6 @@ test-cli: container
.PHONY: test-e2e
test-e2e: test-e2e-1.21


.PHONY: test-e2e-all
test-e2e-all: test-e2e-1.21 test-e2e-1.20 test-e2e-1.19

Expand All @@ -154,6 +153,14 @@ test-e2e-1.20:
test-e2e-1.19:
NODE_IMAGE=kindest/node:v1.19.11@sha256:7664f21f9cb6ba2264437de0eb3fe99f201db7a3ac72329547ec4373ba5f5911 ./test/test-e2e.sh

.PHONY: test-e2e-ha
test-e2e-ha:
HIGH_AVAILABILITY=true $(MAKE) test-e2e

.PHONY: test-e2e-ha-all
test-e2e-ha-all:
HIGH_AVAILABILITY=true $(MAKE) test-e2e-all

# Static analysis
# ---------------

Expand Down
16 changes: 16 additions & 0 deletions manifests/test-ha/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../high-availability
patchesJson6902:
- target:
group: apps
version: v1
kind: Deployment
name: metrics-server
namespace: kube-system
path: patch.yaml
images:
- name: k8s.gcr.io/metrics-server/metrics-server
newName: gcr.io/k8s-staging-metrics-server/metrics-server
newTag: master
6 changes: 6 additions & 0 deletions manifests/test-ha/patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
- op: add
path: /spec/template/spec/containers/0/args/-
value: --kubelet-insecure-tls
- op: add
path: /spec/template/spec/containers/0/imagePullPolicy
value: Never
6 changes: 3 additions & 3 deletions skaffold.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ build:
deploy:
kustomize:
paths:
- manifests/test
- manifests/test
profiles:
- name: high-availability
- name: test-ha
deploy:
kustomize:
paths:
- manifests/high-availability
- manifests/test-ha
185 changes: 95 additions & 90 deletions test/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,10 +189,11 @@ var _ = Describe("MetricsServer", func() {
Expect(usage.Memory().Value()/1024/1024).NotTo(Equal(0), "Memory of Container %q should not be equal zero", ms.Containers[1].Name)
})
It("passes readyz probe", func() {
msPod := mustGetMetricsServerPod(client)
Expect(msPod.Spec.Containers).To(HaveLen(1), "Expected only one container in Metrics Server pod")
resp := mustProxyContainerProbe(restConfig, msPod.Namespace, msPod.Name, msPod.Spec.Containers[0], msPod.Spec.Containers[0].ReadinessProbe)
diff := cmp.Diff(string(resp), `[+]ping ok
msPods := mustGetMetricsServerPods(client)
for _, pod := range msPods {
Expect(pod.Spec.Containers).To(HaveLen(1), "Expected only one container in Metrics Server pod")
resp := mustProxyContainerProbe(restConfig, pod.Namespace, pod.Name, pod.Spec.Containers[0], pod.Spec.Containers[0].ReadinessProbe)
diff := cmp.Diff(string(resp), `[+]ping ok
[+]log ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]informer-sync ok
Expand All @@ -202,100 +203,105 @@ var _ = Describe("MetricsServer", func() {
[+]shutdown ok
readyz check passed
`)
Expect(diff == "").To(BeTrue(), "Unexpected response %s", diff)
Expect(diff == "").To(BeTrue(), "Unexpected response %s", diff)
}
})
It("passes livez probe", func() {
msPod := mustGetMetricsServerPod(client)
Expect(msPod.Spec.Containers).To(HaveLen(1), "Expected only one container in Metrics Server pod")
resp := mustProxyContainerProbe(restConfig, msPod.Namespace, msPod.Name, msPod.Spec.Containers[0], msPod.Spec.Containers[0].LivenessProbe)
diff := cmp.Diff(string(resp), `[+]ping ok
msPods := mustGetMetricsServerPods(client)
for _, pod := range msPods {
Expect(pod.Spec.Containers).To(HaveLen(1), "Expected only one container in Metrics Server pod")
resp := mustProxyContainerProbe(restConfig, pod.Namespace, pod.Name, pod.Spec.Containers[0], pod.Spec.Containers[0].LivenessProbe)
diff := cmp.Diff(string(resp), `[+]ping ok
[+]log ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/max-in-flight-filter ok
[+]metric-collection-timely ok
[+]metadata-informer-sync ok
livez check passed
`)
Expect(diff == "").To(BeTrue(), "Unexpected response %s", diff)
Expect(diff == "").To(BeTrue(), "Unexpected response %s", diff)
}
})
It("exposes prometheus metrics", func() {
msPod := mustGetMetricsServerPod(client)
resp, err := proxyRequestToPod(restConfig, msPod.Namespace, msPod.Name, "https", 443, "/metrics")
Expect(err).NotTo(HaveOccurred(), "Failed to get Metrics Server /metrics endpoint")
metrics, err := parseMetricNames(resp)
Expect(err).NotTo(HaveOccurred(), "Failed to parse Metrics Server metrics")
sort.Strings(metrics)

Expect(metrics).To(Equal([]string{
"apiserver_audit_event_total",
"apiserver_audit_requests_rejected_total",
"apiserver_client_certificate_expiration_seconds",
"apiserver_current_inflight_requests",
"apiserver_envelope_encryption_dek_cache_fill_percent",
"apiserver_request_duration_seconds",
"apiserver_request_filter_duration_seconds",
"apiserver_request_total",
"apiserver_response_sizes",
"apiserver_storage_data_key_generation_duration_seconds",
"apiserver_storage_data_key_generation_failures_total",
"apiserver_storage_envelope_transformation_cache_misses_total",
"apiserver_tls_handshake_errors_total",
"authenticated_user_requests",
"authentication_attempts",
"authentication_duration_seconds",
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads",
"metrics_server_api_metric_freshness_seconds",
"metrics_server_kubelet_last_request_time_seconds",
"metrics_server_kubelet_request_duration_seconds",
"metrics_server_kubelet_request_total",
"metrics_server_manager_tick_duration_seconds",
"metrics_server_storage_points",
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes",
"rest_client_exec_plugin_certificate_rotation_age",
"rest_client_exec_plugin_ttl_seconds",
"rest_client_request_duration_seconds",
"rest_client_requests_total",
"workqueue_adds_total",
"workqueue_depth",
"workqueue_longest_running_processor_seconds",
"workqueue_queue_duration_seconds",
"workqueue_retries_total",
"workqueue_unfinished_work_seconds",
"workqueue_work_duration_seconds",
}), "Unexpected metrics")
msPods := mustGetMetricsServerPods(client)
for _, pod := range msPods {
resp, err := proxyRequestToPod(restConfig, pod.Namespace, pod.Name, "https", 443, "/metrics")
Expect(err).NotTo(HaveOccurred(), "Failed to get Metrics Server /metrics endpoint")
metrics, err := parseMetricNames(resp)
Expect(err).NotTo(HaveOccurred(), "Failed to parse Metrics Server metrics")
sort.Strings(metrics)

Expect(metrics).To(Equal([]string{
"apiserver_audit_event_total",
"apiserver_audit_requests_rejected_total",
"apiserver_client_certificate_expiration_seconds",
"apiserver_current_inflight_requests",
"apiserver_envelope_encryption_dek_cache_fill_percent",
"apiserver_request_duration_seconds",
"apiserver_request_filter_duration_seconds",
"apiserver_request_total",
"apiserver_response_sizes",
"apiserver_storage_data_key_generation_duration_seconds",
"apiserver_storage_data_key_generation_failures_total",
"apiserver_storage_envelope_transformation_cache_misses_total",
"apiserver_tls_handshake_errors_total",
"authenticated_user_requests",
"authentication_attempts",
"authentication_duration_seconds",
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads",
"metrics_server_api_metric_freshness_seconds",
"metrics_server_kubelet_last_request_time_seconds",
"metrics_server_kubelet_request_duration_seconds",
"metrics_server_kubelet_request_total",
"metrics_server_manager_tick_duration_seconds",
"metrics_server_storage_points",
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes",
"rest_client_exec_plugin_certificate_rotation_age",
"rest_client_exec_plugin_ttl_seconds",
"rest_client_request_duration_seconds",
"rest_client_requests_total",
"workqueue_adds_total",
"workqueue_depth",
"workqueue_longest_running_processor_seconds",
"workqueue_queue_duration_seconds",
"workqueue_retries_total",
"workqueue_unfinished_work_seconds",
"workqueue_work_duration_seconds",
}), "Unexpected metrics")
}
})
})

Expand All @@ -307,12 +313,11 @@ func getRestConfig() (*rest.Config, error) {
return clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
}

func mustGetMetricsServerPod(client clientset.Interface) corev1.Pod {
func mustGetMetricsServerPods(client clientset.Interface) []corev1.Pod {
podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{LabelSelector: "k8s-app=metrics-server"})
Expect(err).NotTo(HaveOccurred(), "Failed to find Metrics Server pod")
Expect(podList.Items).NotTo(BeEmpty(), "Metrics Server pod was not found")
Expect(podList.Items).To(HaveLen(1), "Expect to only have one Metrics Server pod")
return podList.Items[0]
return podList.Items
}

func parseMetricNames(data []byte) ([]string, error) {
Expand Down
12 changes: 12 additions & 0 deletions test/kind-ha-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
extraArgs:
"enable-aggregator-routing": "true"
- role: worker
- role: worker
13 changes: 11 additions & 2 deletions test/test-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
set -e

: ${NODE_IMAGE:?Need to set NODE_IMAGE to test}
: ${HIGH_AVAILABILITY:-false}

KIND_VERSION=0.11.0
SKAFFOLD_VERSION=1.24.1
Expand Down Expand Up @@ -40,14 +41,22 @@ setup_skaffold() {
}

create_cluster() {
if ! (${KIND} create cluster --name=e2e --image=${NODE_IMAGE}) ; then
KIND_CONFIG=""
if [ "${HIGH_AVAILABILITY}" = true ] ; then
KIND_CONFIG="$PWD/test/kind-ha-config.yaml"
fi
if ! (${KIND} create cluster --name=e2e --image=${NODE_IMAGE} --config=${KIND_CONFIG}) ; then
echo "Could not create KinD cluster"
exit 1
fi
}

deploy_metrics_server(){
PATH="$PWD/_output:${PATH}" ${SKAFFOLD} run
SKAFFOLD_PROFILE=""
if [ "${HIGH_AVAILABILITY}" = true ] ; then
SKAFFOLD_PROFILE="test-ha"
fi
PATH="$PWD/_output:${PATH}" ${SKAFFOLD} run -p "${SKAFFOLD_PROFILE}"
sleep 5
}

Expand Down

0 comments on commit 2586939

Please sign in to comment.