Skip to content

Commit

Permalink
Migrate hardened docker test
Browse files Browse the repository at this point in the history
Signed-off-by: Derek Nola <derek.nola@suse.com>
  • Loading branch information
dereknola committed Feb 7, 2025
1 parent 677b5de commit 0fa9c7d
Show file tree
Hide file tree
Showing 7 changed files with 400 additions and 9 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ jobs:
strategy:
fail-fast: false
matrix:
dtest: [basics, bootstraptoken, cacerts, etcd, lazypull, skew, snapshotrestore, token, upgrade]
dtest: [basics, bootstraptoken, cacerts, etcd, hardened, lazypull, skew, snapshotrestore, token, upgrade]
arch: [amd64, arm64]
runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }}
env:
Expand Down Expand Up @@ -195,7 +195,7 @@ jobs:
cd ./tests/docker/${{ matrix.dtest }}
if [ ${{ matrix.dtest }} = "upgrade" ] || [ ${{ matrix.dtest }} = "skew" ]; then
./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE -channel=$CHANNEL
elif [ ${{ matrix.dtest }} = "snapshotrestore" ]; then
elif [ ${{ matrix.dtest }} = "snapshotrestore" ] || [ ${{ matrix.dtest }} = "hardened" ]; then
./${{ matrix.dtest }}.test -ci
else
./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE
Expand Down
18 changes: 18 additions & 0 deletions tests/docker/hardened/cluster-level-pss.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration
plugins:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1beta1
kind: PodSecurityConfiguration
defaults:
enforce: "privileged"
enforce-version: "latest"
audit: "baseline"
audit-version: "latest"
warn: "baseline"
warn-version: "latest"
exemptions:
usernames: []
runtimeClasses: []
namespaces: [kube-system]
126 changes: 126 additions & 0 deletions tests/docker/hardened/hardened_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
package main

import (
"flag"
"fmt"
"strings"
"testing"

"github.com/k3s-io/k3s/tests"
"github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var config *docker.TestConfig
var ci = flag.Bool("ci", false, "running on CI")

func Test_DockerHardened(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Hardened Docker Test Suite")
}

var _ = Describe("Hardened Tests", Ordered, func() {

Context("Setup Cluster", func() {
It("should provision servers and agents", func() {
var err error
config, err = docker.NewTestConfig("rancher/systemd-node")
Expect(err).NotTo(HaveOccurred())
config.ServerYaml = `
protect-kernel-defaults: true
secrets-encryption: true
kube-controller-manager-arg:
- 'terminated-pod-gc-threshold=10'
kubelet-arg:
- 'streaming-connection-idle-timeout=5m'
- 'make-iptables-util-chains=true'
- 'event-qps=0'
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
kube-apiserver-arg:
- 'admission-control-config-file=/tmp/cluster-level-pss.yaml'
- 'audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log'
- 'audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml'
- 'audit-log-maxage=30'
- 'audit-log-maxbackup=10'
- 'audit-log-maxsize=100'
`
config.AgentYaml = `
protect-kernel-defaults: true
kubelet-arg:
- 'streaming-connection-idle-timeout=5m'
- "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
`
config.SkipStart = true
Expect(config.ProvisionServers(1)).To(Succeed())

for _, server := range config.Servers {
cmd := "docker cp ./cluster-level-pss.yaml " + server.Name + ":/tmp/cluster-level-pss.yaml"
Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred())

cmd = "mkdir -p /var/lib/rancher/k3s/server/logs"
Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred())
auditYaml := "apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n- level: Metadata"
cmd = fmt.Sprintf("echo -e '%s' > /var/lib/rancher/k3s/server/audit.yaml", auditYaml)
Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred())
Expect(server.RunCmdOnNode("systemctl start k3s")).Error().NotTo(HaveOccurred())
}
Expect(config.CopyAndModifyKubeconfig()).To(Succeed())
config.SkipStart = false
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func() error {
return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())
}, "40s", "5s").Should(Succeed())
})
})

Context("Verify Network Policies", func() {
It("applies network policies", func() {
_, err := config.DeployWorkload("hardened-ingress.yaml")
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get daemonset -n default example -o jsonpath='{.status.numberReady}' --kubeconfig=" + config.KubeconfigFile
return docker.RunCommand(cmd)
}, "60s", "5s").Should(Equal("2"))
_, err = config.DeployWorkload("hardened-netpool.yaml")
})
It("checks ingress connections", func() {
for _, scheme := range []string{"http", "https"} {
for _, server := range config.Servers {
cmd := fmt.Sprintf("curl -vksf -H 'Host: example.com' %s://%s/", scheme, server.IP)
Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred())
}
for _, agent := range config.Agents {
cmd := fmt.Sprintf("curl -vksf -H 'Host: example.com' %s://%s/", scheme, agent.IP)
Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred())
}
}
})
It("confirms we can make a request through the nodeport service", func() {
for _, server := range config.Servers {
cmd := "kubectl get service/example -o 'jsonpath={.spec.ports[*].nodePort}' --kubeconfig=" + config.KubeconfigFile
ports, err := docker.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, port := range strings.Split(ports, " ") {
cmd := fmt.Sprintf("curl -vksf -H 'Host: example.com' http://%s:%s", server.IP, port)
Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred())
}
}
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if *ci || (config != nil && !failed) {
Expect(config.Cleanup()).To(Succeed())
}
})
128 changes: 128 additions & 0 deletions tests/docker/resources/hardened-ingress.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example
namespace: default
labels:
app.kubernetes.io: example
spec:
selector:
matchLabels:
app.kubernetes.io/name: example
template:
metadata:
labels:
app.kubernetes.io/name: example
spec:
automountServiceAccountToken: false
securityContext:
runAsUser: 405
runAsGroup: 100
containers:
- name: socat
image: docker.io/alpine/socat:1.7.4.3-r1
args:
- "TCP-LISTEN:8080,reuseaddr,fork"
- "EXEC:echo -e 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n$(NODE_IP) $(POD_NAMESPACE)/$(POD_NAME)\r\n'"
ports:
- containerPort: 8080
name: http
env:
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 10
httpGet:
path: /
port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: example
namespace: default
spec:
type: NodePort
selector:
app.kubernetes.io/name: example
ports:
- name: http
protocol: TCP
port: 80
nodePort: 30096
targetPort: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example
spec:
rules:
- host: "example.com"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: example
port:
name: http
---
# Allow access to example backend from traefik ingress
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-backend-example
namespace: default
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: example
ingress:
- ports:
- port: 8080
protocol: TCP
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
policyTypes:
- Ingress
---
# Allow access to example backend from outside the cluster via nodeport service
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: nodeport-to-backend-example
namespace: default
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: example
ingress:
- ports:
- port: 8080
protocol: TCP
- from:
- ipBlock:
cidr: 0.0.0.0/0
except:
- 10.42.0.0/16
- 10.43.0.0/16
policyTypes:
- Ingress
Loading

0 comments on commit 0fa9c7d

Please sign in to comment.