diff --git a/Makefile b/Makefile index cb9cdb7c..7ff3a3a0 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ setup: ## Basic project setup, e.g. installing GitHook for checking license head cd .git/hooks && ln -fs ../../.githooks/* . manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=instana-agent-clusterrole webhook paths="./..." output:crd:artifacts:config=config/crd/bases generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." diff --git a/bundle/manifests/manager-role_rbac.authorization.k8s.io_v1_role.yaml b/bundle/manifests/manager-role_rbac.authorization.k8s.io_v1_role.yaml index f6ff0b5f..fdebf459 100644 --- a/bundle/manifests/manager-role_rbac.authorization.k8s.io_v1_role.yaml +++ b/bundle/manifests/manager-role_rbac.authorization.k8s.io_v1_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null - name: manager-role + name: instana-agent-clusterrole rules: - apiGroups: - agents.instana.io diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 7a7fa930..94ffaec2 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -8,7 +8,7 @@ metadata: apiVersion: apps/v1 kind: Deployment metadata: - name: controller-manager + name: instana-agent-controller-manager namespace: system labels: app.kubernetes.io/name: instana-agent-operator diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fcf15c49..6f32eed8 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: manager-role + name: instana-agent-clusterrole rules: - nonResourceURLs: - /healthz diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index c329870d..55539472 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,11 +1,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: manager-rolebinding + name: instana-agent-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: manager-role + name: instana-agent-clusterrole subjects: - kind: ServiceAccount name: instana-agent-operator diff --git a/e2e/agent_test_api.go b/e2e/agent_test_api.go index 5e51b1d9..81ec92b7 100644 --- a/e2e/agent_test_api.go +++ b/e2e/agent_test_api.go @@ -21,6 +21,8 @@ import ( v1 "github.com/instana/instana-agent-operator/api/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -75,7 +77,7 @@ func EnsureAgentNamespaceDeletion() env.Func { } // full purge of resources if anything would be left in the cluster - p = utils.RunCommand("kubectl delete crd/agents.instana.io clusterrole/instana-agent-k8sensor clusterrole/manager-role clusterrole/leader-election-role clusterrolebinding/leader-election-rolebinding clusterrolebinding/manager-rolebinding") + p = utils.RunCommand("kubectl delete crd/agents.instana.io clusterrole/instana-agent-k8sensor clusterrole/instana-agent-clusterrole clusterrole/leader-election-role clusterrolebinding/leader-election-rolebinding clusterrolebinding/instana-agent-clusterrolebinding") if p.Err() != nil { log.Warningf("Could not remove some artifacts, ignoring as they might not be present %s - %s - %s - %d", p.Command(), p.Err(), p.Out(), p.ExitCode()) } @@ -317,7 +319,7 @@ func SetupOperatorDevBuild() e2etypes.StepFunc { func DeployAgentCr(agent *v1.InstanaAgent) e2etypes.StepFunc { return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - // Wait for controller-manager deployment to ensure that CRD is installed correctly before proceeding. + // Wait for instana-agent-controller-manager deployment to ensure that CRD is installed correctly before proceeding. // Technically, it could be categorized as "Assess" method, but the setup process requires to wait in between. // Therefore, keeping the wait logic in this section. client, err := cfg.NewClient() @@ -389,6 +391,63 @@ func WaitForAgentDaemonSetToBecomeReady(args ...string) e2etypes.StepFunc { } } +func EnsureOldControllerManagerDeploymentIsNotRunning() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + t.Logf("Ensuring the old deployment %s is not running", InstanaOperatorOldDeploymentName) + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + dep := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: InstanaOperatorOldDeploymentName, Namespace: cfg.Namespace()}, + } + err = wait.For(conditions.New(client.Resources()).ResourceDeleted(&dep), wait.WithTimeout(time.Minute*2)) + if err != nil { + t.Fatal(err) + } + t.Logf("Deployment %s is deleted", InstanaOperatorOldDeploymentName) + return ctx + } +} + +func EnsureOldClusterRoleIsGone() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + t.Logf("Ensuring the old clusterrole %s is not running", InstanaOperatorOldClusterRoleName) + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + clusterrole := rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: InstanaOperatorOldClusterRoleName}, + } + err = wait.For(conditions.New(client.Resources()).ResourceDeleted(&clusterrole), wait.WithTimeout(time.Minute*2)) + if err != nil { + t.Fatal(err) + } + t.Logf("ClusteRole %s is deleted", InstanaOperatorOldClusterRoleName) + return ctx + } +} + +func EnsureOldClusterRoleBindingIsGone() e2etypes.StepFunc { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + t.Logf("Ensuring the old clusterrolebinding %s is not running", InstanaOperatorOldClusterRoleBindingName) + client, err := cfg.NewClient() + if err != nil { + t.Fatal(err) + } + clusterrolebinding := rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: InstanaOperatorOldClusterRoleBindingName}, + } + err = wait.For(conditions.New(client.Resources()).ResourceDeleted(&clusterrolebinding), wait.WithTimeout(time.Minute*2)) + if err != nil { + t.Fatal(err) + } + t.Logf("ClusteRoleBinding %s is deleted", InstanaOperatorOldClusterRoleBindingName) + return ctx + } +} + func WaitForAgentSuccessfulBackendConnection() e2etypes.StepFunc { return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { t.Log("Searching for successful backend connection in agent logs") @@ -407,8 +466,8 @@ func WaitForAgentSuccessfulBackendConnection() e2etypes.StepFunc { connectionSuccessful := false var buf *bytes.Buffer for i := 0; i < 9; i++ { - t.Log("Sleeping 10 seconds") - time.Sleep(10 * time.Second) + t.Log("Sleeping 20 seconds") + time.Sleep(20 * time.Second) t.Log("Fetching logs") logReq := clientSet.CoreV1().Pods(cfg.Namespace()).GetLogs(podList.Items[0].Name, &corev1.PodLogOptions{}) podLogs, err := logReq.Stream(ctx) diff --git a/e2e/config.go b/e2e/config.go index 82c9c555..fd1f57f9 100644 --- a/e2e/config.go +++ b/e2e/config.go @@ -39,7 +39,10 @@ type OperatorImage struct { var InstanaTestCfg InstanaTestConfig const InstanaNamespace string = "instana-agent" -const InstanaOperatorDeploymentName string = "controller-manager" +const InstanaOperatorOldDeploymentName string = "controller-manager" +const InstanaOperatorOldClusterRoleName string = "manager-role" +const InstanaOperatorOldClusterRoleBindingName string = "manager-rolebinding" +const InstanaOperatorDeploymentName string = "instana-agent-controller-manager" const AgentDaemonSetName string = "instana-agent" const AgentCustomResourceName string = "instana-agent" const K8sensorDeploymentName string = "instana-agent-k8sensor" diff --git a/e2e/install_test.go b/e2e/install_test.go index 6c038720..55c3894a 100644 --- a/e2e/install_test.go +++ b/e2e/install_test.go @@ -21,7 +21,7 @@ func TestInitialInstall(t *testing.T) { initialInstallFeature := features.New("initial install dev-operator-build"). Setup(SetupOperatorDevBuild()). Setup(DeployAgentCr(&agent)). - Assess("wait for controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Assess("wait for instana-agent-controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). @@ -44,7 +44,7 @@ func TestUpdateInstall(t *testing.T) { } return ctx }). - Setup(WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Setup(WaitForDeploymentToBecomeReady(InstanaOperatorOldDeploymentName)). //TODO: revert after the 2.1.15 release Setup(DeployAgentCr(&agent)). Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). @@ -53,7 +53,7 @@ func TestUpdateInstall(t *testing.T) { updateInstallDevBuildFeature := features.New("upgrade install from latest released to dev-operator-build"). Setup(SetupOperatorDevBuild()). - Assess("wait for controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Assess("wait for instana-agent-controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). diff --git a/e2e/upgrade_test.go b/e2e/upgrade_test.go new file mode 100644 index 00000000..52cf43be --- /dev/null +++ b/e2e/upgrade_test.go @@ -0,0 +1,84 @@ +/* + * (c) Copyright IBM Corp. 2025 + * (c) Copyright Instana Inc. 2025 + */ + +package e2e + +import ( + "context" + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" + "sigs.k8s.io/e2e-framework/support/utils" +) + +func TestUpdateInstallFromOldGenericResourceNames(t *testing.T) { + agent := NewAgentCr(t) + installLatestFeature := features.New("deploy instana-agent-operator with the generic resource names (controller-manager, manager-role and manager-rolebinding)"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + const oldResourceNamesOperatorYamlUrl string = "https://github.com/instana/instana-agent-operator/releases/download/v2.1.14/instana-agent-operator.yaml" + t.Logf("Installing latest operator with the old, generic resource names from %s", oldResourceNamesOperatorYamlUrl) + p := utils.RunCommand( + fmt.Sprintf("kubectl apply -f %s", oldResourceNamesOperatorYamlUrl), + ) + if p.Err() != nil { + t.Fatal("Error while applying the old operator yaml", p.Command(), p.Err(), p.Out(), p.ExitCode()) + } + return ctx + }). + Setup(WaitForDeploymentToBecomeReady(InstanaOperatorOldDeploymentName)). + Setup(DeployAgentCr(&agent)). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + updateInstallDevBuildFeature := features.New("upgrade install from latest released to dev-operator-build"). + Setup(SetupOperatorDevBuild()). + Assess("wait for instana-agent-controller-manager deployment to become ready", WaitForDeploymentToBecomeReady(InstanaOperatorDeploymentName)). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady(K8sensorDeploymentName)). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + checkReconciliationFeature := features.New("check reconcile works with new operator deployment"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + // delete agent daemonset + t.Log("Delete agent DaemonSet") + var ds appsv1.DaemonSet + if err := cfg.Client().Resources().Get(ctx, AgentDaemonSetName, cfg.Namespace(), &ds); err != nil { + t.Fatal(err) + } + if err := cfg.Client().Resources().Delete(ctx, &ds); err != nil { + t.Fatal(err) + } + t.Log("Agent DaemonSet deleted") + + t.Log("Delete k8sensor Deployment") + var dep appsv1.Deployment + if err := cfg.Client().Resources().Get(ctx, K8sensorDeploymentName, cfg.Namespace(), &dep); err != nil { + t.Fatal(err) + } + + if err := cfg.Client().Resources().Delete(ctx, &dep); err != nil { + t.Fatal(err) + } + t.Log("K8sensor Deployment deleted") + t.Log("Assessing reconciliation now") + return ctx + }). + Assess("confirm the old deployment is gone", EnsureOldControllerManagerDeploymentIsNotRunning()). + Assess("confirm the old clusterrole is gone", EnsureOldClusterRoleIsGone()). + Assess("confirm the old clusterrolebinding is gone", EnsureOldClusterRoleBindingIsGone()). + Assess("wait for k8sensor deployment to become ready", WaitForDeploymentToBecomeReady("instana-agent-k8sensor")). + Assess("wait for agent daemonset to become ready", WaitForAgentDaemonSetToBecomeReady()). + Assess("check agent log for successful connection", WaitForAgentSuccessfulBackendConnection()). + Feature() + + // test feature + testEnv.Test(t, installLatestFeature, updateInstallDevBuildFeature, checkReconciliationFeature) +} diff --git a/main.go b/main.go index 1389601c..bdbef54b 100644 --- a/main.go +++ b/main.go @@ -6,17 +6,26 @@ package main import ( + "context" "flag" "fmt" "os" "runtime" "strconv" + appsv1 "k8s.io/api/apps/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -24,6 +33,7 @@ import ( agentoperatorv1 "github.com/instana/instana-agent-operator/api/v1" "github.com/instana/instana-agent-operator/controllers" + instanaclient "github.com/instana/instana-agent-operator/pkg/k8s/client" "github.com/instana/instana-agent-operator/version" // +kubebuilder:scaffold:imports ) @@ -37,6 +47,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(agentoperatorv1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -67,9 +78,10 @@ func main() { logf.SetLogger(zap.New(zap.UseFlagOptions(&opts)).WithName("instana")) printVersion() + cfg := ctrl.GetConfigOrDie() mgr, err := ctrl.NewManager( - ctrl.GetConfigOrDie(), ctrl.Options{ + cfg, ctrl.Options{ Metrics: metricsserver.Options{ BindAddress: metricsAddr, }, @@ -92,19 +104,132 @@ func main() { log.Error(err, "Unable to set up ready check") os.Exit(1) } - // Add our own Agent Controller to the manager if err := controllers.Add(mgr); err != nil { log.Error(err, "Failure setting up Instana Agent Controller") os.Exit(1) } + // controller-manager only runs controllers/runnables after getting the lock + // we do the cleanup beforehand so our new deployment gets the lock + log.Info("Deleting the controller-manager deployment and RBAC if it's present") + //we need a new client because we have to delete old resources before starting the new manager + if client, err := k8sClient.New(cfg, k8sClient.Options{ + Scheme: scheme, + }); err != nil { + log.Error(err, "Failed to create a new k8s client") + } else { + cleanupOldOperator(client) + } + log.Info("Starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { log.Error(err, "Problem running manager") os.Exit(1) } +} +func cleanupOldOperator(k8sClient k8sClient.Client) { + const labelKey string = "app.kubernetes.io/name" + const InstanaOperatorOldDeploymentName string = "controller-manager" + const InstanaOperatorOldClusterRoleName string = "manager-role" + const InstanaOperatorOldClusterRoleBindingName string = "manager-rolebinding" + + log.Info("Delete the old deployment if present") + deploymentsList := &appsv1.DeploymentList{} + // check that resources match with the old name + fieldSelector := fields.OneTermEqualSelector("metadata.name", InstanaOperatorOldDeploymentName) + // check that resources have label "app.kubernetes.io/name: instana-agent-operator" + labelSelector := labels.SelectorFromSet(labels.Set{labelKey: instanaclient.FieldOwnerName}) + + deploymentOptions := &client.ListOptions{ + FieldSelector: fieldSelector, + LabelSelector: labelSelector, + // checking all namespaces + } + + if err := k8sClient.List(context.Background(), deploymentsList, deploymentOptions); err != nil { + log.Info(fmt.Sprintf("Failed to get list the deployment with the label %s:%s and name %s", labelKey, instanaclient.FieldOwnerName, InstanaOperatorOldDeploymentName)) + } else { + // there should be only one deployment but we iterate just in case + log.Info(fmt.Sprintf("Found %v deployments that match the criteria", len(deploymentsList.Items))) + for _, deployment := range deploymentsList.Items { + ns := deployment.GetNamespace() + log.Info(fmt.Sprintf("Deleting the old operator deployment %s in namespace %s", InstanaOperatorOldDeploymentName, ns)) + if err := k8sClient.Delete(context.Background(), &deployment); err != nil { + log.Info(fmt.Sprintf("Failed to delete the old operator deployment %s", InstanaOperatorOldDeploymentName)) + } else { + log.Info(fmt.Sprintf("Successfully deleted the deployment %s", InstanaOperatorOldDeploymentName)) + } + } + } + + log.Info("Delete old RBAC resources if present") + oldRole := &rbacv1.ClusterRole{} + roleKey := types.NamespacedName{ + Name: InstanaOperatorOldClusterRoleName, + } + if err := k8sClient.Get(context.Background(), roleKey, oldRole); err != nil { + if errors.IsNotFound(err) { + log.Info("Old operator clusterrole is not present in the cluster") + } else { + log.Error(err, "Failed to get the old operator clusterrole "+InstanaOperatorOldClusterRoleName) + } + } else { + // check if it has an API group "instana.io" + hasInstanaApiGroup := false + for _, rule := range oldRole.Rules { + for _, apiGroup := range rule.APIGroups { + if apiGroup == "instana.io" { + hasInstanaApiGroup = true + break + } + } + if hasInstanaApiGroup { + break + } + } + if !hasInstanaApiGroup { + log.Info(fmt.Sprintf("ClusterRole with name %s found, but it's not coming from instana; skipping the deletion", InstanaOperatorOldClusterRoleName)) + } else { + log.Info(fmt.Sprintf("Deleting the old operator clusterrole %s", InstanaOperatorOldClusterRoleName)) + if err := k8sClient.Delete(context.Background(), oldRole); err != nil { + log.Info(fmt.Sprintf("Failed to delete the old operator clusterrole %s", InstanaOperatorOldClusterRoleName)) + } else { + log.Info(fmt.Sprintf("Successfully deleted the clusterrole %s", InstanaOperatorOldClusterRoleName)) + } + } + } + + oldRoleBinding := &rbacv1.ClusterRoleBinding{} + bindingKey := types.NamespacedName{ + Name: InstanaOperatorOldClusterRoleBindingName, + } + if err := k8sClient.Get(context.Background(), bindingKey, oldRoleBinding); err != nil { + if errors.IsNotFound(err) { + log.Info("Old operator clusterrolebinding is not present in the cluster") + } else { + log.Error(err, "Failed to get the old operator clusterrolebinding "+InstanaOperatorOldClusterRoleBindingName) + } + } else { + hasInstanaAgentOperatorSA := false + for _, subject := range oldRoleBinding.Subjects { + if subject.Kind == "ServiceAccount" && subject.Name == instanaclient.FieldOwnerName { + hasInstanaAgentOperatorSA = true + break + } + } + if !hasInstanaAgentOperatorSA { + log.Info(fmt.Sprintf("ClusterRoleBinding with name %s found, but the SA doesn't match; skipping the deletion", InstanaOperatorOldClusterRoleBindingName)) + } else { + log.Info(fmt.Sprintf("Deleting the old operator clusterrolebinding %s", InstanaOperatorOldClusterRoleBindingName)) + if err := k8sClient.Delete(context.Background(), oldRoleBinding); err != nil { + log.Info("Failed to delete the old operator clusterrolebinding " + InstanaOperatorOldClusterRoleBindingName) + } else { + log.Info(fmt.Sprintf("Successfully deleted the clusterrolebinding %s", InstanaOperatorOldClusterRoleBindingName)) + } + } + } } func printVersion() { diff --git a/pkg/k8s/client/client.go b/pkg/k8s/client/client.go index 25bd4f6c..a1464381 100644 --- a/pkg/k8s/client/client.go +++ b/pkg/k8s/client/client.go @@ -52,6 +52,7 @@ type InstanaAgentClient interface { GetAsResult(ctx context.Context, key k8sClient.ObjectKey, obj k8sClient.Object, opts ...k8sClient.GetOption) result.Result[k8sClient.Object] Status() k8sClient.SubResourceWriter Patch(ctx context.Context, obj k8sClient.Object, patch k8sClient.Patch, opts ...k8sClient.PatchOption) error + Delete(ctx context.Context, obj k8sClient.Object, opts ...k8sClient.DeleteOption) error } type instanaAgentClient struct { @@ -119,6 +120,14 @@ func (c *instanaAgentClient) Apply( ) } +func (c *instanaAgentClient) Delete( + ctx context.Context, + obj k8sClient.Object, + opts ...k8sClient.DeleteOption, +) error { + return c.k8sClient.Delete(ctx, obj, opts...) +} + func (c *instanaAgentClient) GetAsResult( ctx context.Context, key k8sClient.ObjectKey,