Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable logging for e2e tests #430

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,14 @@ jobs:
- name: Run e2e tests
run: "make test-e2e GINKGO_SKIP=${{ env.SKIP_E2E }}"

- name: Upload artifacts
uses: actions/upload-artifact@v4
if: success() || failure()
with:
name: logs
path: _artifacts
retention-days: 7

- name: Cleanup kind clusters
uses: gacts/run-and-post-run@v1
with:
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/capmox_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ var _ = Describe("Workload cluster creation", func() {
result = new(clusterctl.ApplyClusterTemplateAndWaitResult)

// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName())
clusterctlLogFolder = filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName())
})

AfterEach(func() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ spec:
owner: root:root
permissions: "0700"
preKubeadmCommands:
- echo "127.0.0.1 localhost kubernetes {{ ds.meta_data.hostname }}" >>/etc/hosts
- /etc/kube-vip-prepare.sh
initConfiguration:
nodeRegistration:
Expand Down
68 changes: 56 additions & 12 deletions test/e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,10 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1"
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework"
Expand Down Expand Up @@ -76,6 +78,11 @@ var (

// Test suite global vars.
var (
ctx = ctrl.SetupSignalHandler()

// watchesCtx is used in log streaming to be able to get canceled via cancelWatches after ending the test suite.
watchesCtx, cancelWatches = context.WithCancel(ctx)

// e2eConfig to be used for this test, read from configPath.
e2eConfig *clusterctl.E2EConfig

Expand Down Expand Up @@ -112,22 +119,16 @@ func TestE2E(t *testing.T) {

ctrl.SetLogger(klog.Background())

// If running in prow, make sure to use the artifacts folder that will be reported in test grid (ignoring the value provided by flag).
if prowArtifactFolder, exists := os.LookupEnv("ARTIFACTS"); exists {
artifactFolder = prowArtifactFolder
}

// ensure the artifacts folder exists
g.Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid test suite argument. Can't create e2e.artifacts-folder %q", artifactFolder) //nolint:gosec

RegisterFailHandler(Fail)

if alsoLogToFile {
w, err := ginkgoextensions.EnableFileLogging(filepath.Join(artifactFolder, "ginkgo-log.txt"))
g.Expect(err).ToNot(HaveOccurred())
defer w.Close()
}

RegisterFailHandler(Fail)
RunSpecs(t, "capmox-e2e")
}

Expand Down Expand Up @@ -181,7 +182,7 @@ var _ = SynchronizedBeforeSuite(func() []byte {
kubeconfigPath := parts[3]

e2eConfig = loadE2EConfig(configPath)
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme())
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), framework.WithMachineLogCollector(framework.DockerLogCollector{}))
})

// Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads).
Expand All @@ -191,6 +192,10 @@ var _ = SynchronizedAfterSuite(func() {
// After each ParallelNode.
}, func() {
// After all ParallelNodes.
By("Dumping logs from the bootstrap cluster")
if err := dumpBootstrapClusterLogs(); err != nil {
GinkgoWriter.Printf("Failed to dump bootstrap cluster logs: %v", err)
}

By("Tearing down the management cluster")
if !skipCleanup {
Expand All @@ -208,7 +213,7 @@ func initScheme() *runtime.Scheme {
}

func loadE2EConfig(configPath string) *clusterctl.E2EConfig {
config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
config := clusterctl.LoadE2EConfig(ctx, clusterctl.LoadE2EConfigInput{ConfigPath: configPath})
Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath)

return config
Expand All @@ -226,7 +231,7 @@ func createClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFol
Expect(cniPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", capi_e2e.CNIPath)
createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(cniPath, capi_e2e.CNIResources)

clusterctlConfig := clusterctl.CreateRepository(context.TODO(), createRepositoryInput)
clusterctlConfig := clusterctl.CreateRepository(ctx, createRepositoryInput)
Expect(clusterctlConfig).To(BeAnExistingFile(), "The clusterctl config file does not exists in the local repository %s", repositoryFolder)

return clusterctlConfig
Expand All @@ -236,10 +241,11 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,
var clusterProvider bootstrap.ClusterProvider
kubeconfigPath := ""
if !useExistingCluster {
clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(context.TODO(), bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
Name: config.ManagementClusterName,
RequiresDockerSock: config.HasDockerProvider(),
Images: config.Images,
LogFolder: filepath.Join(artifactFolder, "kind"),
})
Expect(clusterProvider).ToNot(BeNil(), "Failed to create a bootstrap cluster")

Expand All @@ -254,7 +260,7 @@ func setupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme,
}

func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) {
clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{
clusterctl.InitManagementClusterAndWatchControllerLogs(watchesCtx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: bootstrapClusterProxy,
ClusterctlConfigPath: clusterctlConfig,
InfrastructureProviders: config.InfrastructureProviders(),
Expand All @@ -264,10 +270,48 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *
}

func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) {
cancelWatches()
if bootstrapClusterProxy != nil {
bootstrapClusterProxy.Dispose(context.TODO())
}
if bootstrapClusterProvider != nil {
bootstrapClusterProvider.Dispose(context.TODO())
}
}

func dumpBootstrapClusterLogs() error {
if bootstrapClusterProxy == nil {
return nil
}
clusterLogCollector := bootstrapClusterProxy.GetLogCollector()
if clusterLogCollector == nil {
return nil
}

nodes, err := bootstrapClusterProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get nodes for the bootstrap cluster: %w", err)
}

for i := range nodes.Items {
nodeName := nodes.Items[i].GetName()
err := clusterLogCollector.CollectMachineLog(
ctx,
bootstrapClusterProxy.GetClient(),
// The bootstrap cluster is not expected to be a CAPI cluster, so in order to reuse the logCollector,
// we create a fake machine that wraps the node.
// NOTE: This assumes a naming convention between machine and nodes, which e.g. applies to the bootstrap
// clusters generated with kind. This might not work if you are using an existing bootstrap cluster
// provided by other means
&clusterv1.Machine{
Spec: clusterv1.MachineSpec{ClusterName: nodeName},
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
},
filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "machines", nodeName),
)
if err != nil {
return fmt.Errorf("failed to get logs for the bootstrap cluster node %s: %w", nodeName, err)
}
}
return nil
}
Loading