From cf9f0bb8985290a0e4dc7c3b5582a7d640d3972c Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Thu, 27 Feb 2025 10:32:43 -0500 Subject: [PATCH 1/9] Adds TEST_FILTER env var to e2e run Signed-off-by: Brett Tofel --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cc716378f..2ef7afa0a 100644 --- a/Makefile +++ b/Makefile @@ -192,7 +192,7 @@ test: manifests generate fmt lint test-unit test-e2e #HELP Run all tests. .PHONY: e2e e2e: #EXHELP Run the e2e tests. - go test -count=1 -v ./test/e2e/... + go test -count=1 -v -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" ./test/e2e/... E2E_REGISTRY_NAME := docker-registry E2E_REGISTRY_NAMESPACE := operator-controller-e2e From 27c86c2c626b4171d527d5fc1f310857e95e5591 Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Thu, 27 Feb 2025 14:02:00 -0500 Subject: [PATCH 2/9] Rewrite metrics endpoint test to use client-go test/utils.go deleted because no used funcs Signed-off-by: Brett Tofel --- test/e2e/metrics_test.go | 413 +++++++++++++++++++++++++++------------ test/utils/utils.go | 69 ------- 2 files changed, 292 insertions(+), 190 deletions(-) delete mode 100644 test/utils/utils.go diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go index a1f6c4a2c..a1a2fbd4e 100644 --- a/test/e2e/metrics_test.go +++ b/test/e2e/metrics_test.go @@ -16,23 +16,31 @@ package e2e import ( "bytes" "context" - "fmt" - "io" - "os/exec" + "errors" "strings" "testing" "time" "github.com/stretchr/testify/require" - - "github.com/operator-framework/operator-controller/test/utils" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "sigs.k8s.io/controller-runtime/pkg/client/config" ) // TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { - client := utils.FindK8sClient(t) - config := NewMetricsTestConfig( - t, client, + kubeClient, restConfig := findK8sClient(t) + mtc := NewMetricsTestConfig( + t, + kubeClient, + restConfig, "control-plane=operator-controller-controller-manager", "operator-controller-metrics-reader", "operator-controller-metrics-binding", @@ -41,14 +49,16 @@ func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { "https://operator-controller-service.NAMESPACE.svc.cluster.local:8443/metrics", ) - config.run() + mtc.run() } // TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd func TestCatalogdMetricsExportedEndpoint(t *testing.T) { - client := utils.FindK8sClient(t) - config := NewMetricsTestConfig( - t, client, + kubeClient, restConfig := findK8sClient(t) + mtc := NewMetricsTestConfig( + t, + kubeClient, + restConfig, "control-plane=catalogd-controller-manager", "catalogd-metrics-reader", "catalogd-metrics-binding", @@ -57,13 +67,29 @@ func TestCatalogdMetricsExportedEndpoint(t *testing.T) { "https://catalogd-service.NAMESPACE.svc.cluster.local:7443/metrics", ) - config.run() + mtc.run() +} + +func findK8sClient(t *testing.T) (kubernetes.Interface, *rest.Config) { + cfg, err := config.GetConfig() + if err != nil { + t.Fatalf("Failed to get Kubernetes config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(cfg) + if err != nil { + t.Fatalf("Failed to create client from config: %v", err) + } + + t.Log("Successfully created Kubernetes client via controller-runtime config") + return clientset, cfg } // MetricsTestConfig holds the necessary configurations for testing metrics endpoints. type MetricsTestConfig struct { t *testing.T - client string + kubeClient kubernetes.Interface + restConfig *rest.Config namespace string clusterRole string clusterBinding string @@ -73,13 +99,27 @@ type MetricsTestConfig struct { } // NewMetricsTestConfig initializes a new MetricsTestConfig. -func NewMetricsTestConfig(t *testing.T, client, selector, clusterRole, clusterBinding, serviceAccount, curlPodName, metricsURL string) *MetricsTestConfig { - namespace := getComponentNamespace(t, client, selector) +func NewMetricsTestConfig( + t *testing.T, + kubeClient kubernetes.Interface, + restConfig *rest.Config, + selector string, + clusterRole string, + clusterBinding string, + serviceAccount string, + curlPodName string, + metricsURL string, +) *MetricsTestConfig { + // Discover which namespace the relevant Pod is running in + namespace := getComponentNamespace(t, kubeClient, selector) + + // Replace the placeholder in the metrics URL metricsURL = strings.ReplaceAll(metricsURL, "NAMESPACE", namespace) return &MetricsTestConfig{ t: t, - client: client, + kubeClient: kubeClient, + restConfig: restConfig, namespace: namespace, clusterRole: clusterRole, clusterBinding: clusterBinding, @@ -89,134 +129,265 @@ func NewMetricsTestConfig(t *testing.T, client, selector, clusterRole, clusterBi } } -// run will execute all steps of those tests +// run executes the entire test flow func (c *MetricsTestConfig) run() { - c.createMetricsClusterRoleBinding() - token := c.getServiceAccountToken() - c.createCurlMetricsPod() - c.validate(token) - defer c.cleanup() -} - -// createMetricsClusterRoleBinding to binding and expose the metrics -func (c *MetricsTestConfig) createMetricsClusterRoleBinding() { - c.t.Logf("Creating ClusterRoleBinding %s in namespace %s", c.clusterBinding, c.namespace) - cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding, - "--clusterrole="+c.clusterRole, - "--serviceaccount="+c.namespace+":"+c.serviceAccount) - output, err := cmd.CombinedOutput() - require.NoError(c.t, err, "Error creating ClusterRoleBinding: %s", string(output)) -} - -// getServiceAccountToken return the token requires to have access to the metrics -func (c *MetricsTestConfig) getServiceAccountToken() string { - c.t.Logf("Generating ServiceAccount token at namespace %s", c.namespace) - cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "-n", c.namespace) - tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd) - require.NoError(c.t, err, "Error creating token: %s", string(tokenCombinedOutput)) - return string(bytes.TrimSpace(tokenOutput)) -} - -// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working -func (c *MetricsTestConfig) createCurlMetricsPod() { + ctx := context.Background() + c.createMetricsClusterRoleBinding(ctx) + token := c.getServiceAccountToken(ctx) + c.createCurlMetricsPod(ctx) + c.waitForPodReady(ctx) + // Exec `curl` in the Pod to validate the metrics + c.validateMetricsEndpoint(ctx, token) + defer c.cleanup(ctx) +} + +// createMetricsClusterRoleBinding to bind the cluster role so metrics are accessible +func (c *MetricsTestConfig) createMetricsClusterRoleBinding(ctx context.Context) { + c.t.Logf("Creating ClusterRoleBinding %q in namespace %q", c.clusterBinding, c.namespace) + + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.clusterBinding, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: c.serviceAccount, + Namespace: c.namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: c.clusterRole, + }, + } + + _, err := c.kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}) + require.NoError(c.t, err, "Error creating ClusterRoleBinding") +} + +// getServiceAccountToken creates a TokenRequest for the service account +func (c *MetricsTestConfig) getServiceAccountToken(ctx context.Context) string { + c.t.Logf("Generating ServiceAccount token in namespace %q", c.namespace) + + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: []string{"https://kubernetes.default.svc.cluster.local"}, + ExpirationSeconds: nil, + }, + } + + tr, err := c.kubeClient.CoreV1(). + ServiceAccounts(c.namespace). + CreateToken(ctx, c.serviceAccount, tokenRequest, metav1.CreateOptions{}) + require.NoError(c.t, err, "Error requesting token for SA %q", c.serviceAccount) + + token := tr.Status.Token + require.NotEmpty(c.t, token, "ServiceAccount token was empty") + return token +} + +// createCurlMetricsPod spawns a pod running `curlimages/curl` to check metrics +func (c *MetricsTestConfig) createCurlMetricsPod(ctx context.Context) { c.t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName) - cmd := exec.Command(c.client, "run", c.curlPodName, - "--image=curlimages/curl", "-n", c.namespace, - "--restart=Never", - "--overrides", `{ - "spec": { - "terminationGradePeriodSeconds": 0, - "containers": [{ - "name": "curl", - "image": "curlimages/curl", - "command": ["sh", "-c", "sleep 3600"], - "securityContext": { - "allowPrivilegeEscalation": false, - "capabilities": {"drop": ["ALL"]}, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": {"type": "RuntimeDefault"} - } - }], - "serviceAccountName": "`+c.serviceAccount+`" - } - }`) - output, err := cmd.CombinedOutput() - require.NoError(c.t, err, "Error creating curl pod: %s", string(output)) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.curlPodName, + Namespace: c.namespace, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: c.serviceAccount, + TerminationGracePeriodSeconds: int64Ptr(0), + Containers: []corev1.Container{ + { + Name: "curl", + Image: "curlimages/curl", + Command: []string{"sh", "-c", "sleep 3600"}, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolPtr(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + RunAsNonRoot: boolPtr(true), + RunAsUser: int64Ptr(1000), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + } + + _, err := c.kubeClient.CoreV1().Pods(c.namespace).Create(ctx, pod, metav1.CreateOptions{}) + require.NoError(c.t, err, "Error creating curl pod") } -// validate verifies if is possible to access the metrics -func (c *MetricsTestConfig) validate(token string) { +// waitForPodReady polls until the Pod is in Ready condition +func (c *MetricsTestConfig) waitForPodReady(ctx context.Context) { c.t.Log("Waiting for the curl pod to be ready") - waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "-n", c.namespace, "--timeout=60s") - waitOutput, waitErr := waitCmd.CombinedOutput() - require.NoError(c.t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput)) + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { + pod, err := c.kubeClient.CoreV1().Pods(c.namespace).Get(ctx, c.curlPodName, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + return true, nil + } + } + return false, nil + }) + if err != nil { + // If the context timed out, the test should fail with a more direct message + if errors.Is(err, context.DeadlineExceeded) { + c.t.Fatal("Timed out waiting for the curl pod to become Ready") + } + require.NoError(c.t, err, "Error waiting for curl pod to become Ready") + } +} + +// validateMetricsEndpoint performs `kubectl exec ... curl ` logic +func (c *MetricsTestConfig) validateMetricsEndpoint(ctx context.Context, token string) { + c.t.Log("Validating the metrics endpoint via pod exec") + + // The command to run inside the container + cmd := []string{ + "curl", "-v", "-k", + "-H", "Authorization: Bearer " + token, + c.metricsURL, + } + + // Construct the request to exec into the pod + req := c.kubeClient.CoreV1().RESTClient(). + Post(). + Resource("pods"). + Namespace(c.namespace). + Name(c.curlPodName). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Container: "curl", + Command: cmd, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + }, scheme.ParameterCodec) + + // Create an SPDY executor + executor, err := remotecommand.NewSPDYExecutor(c.restConfig, "POST", req.URL()) + require.NoError(c.t, err, "Error creating SPDY executor to exec in pod") + + var stdout, stderr bytes.Buffer + streamOpts := remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + Tty: false, + } + + err = executor.StreamWithContext(ctx, streamOpts) + require.NoError(c.t, err, "Error streaming exec request: %v", stderr.String()) - c.t.Log("Validating the metrics endpoint") - curlCmd := exec.Command(c.client, "exec", c.curlPodName, "-n", c.namespace, "--", - "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, c.metricsURL) - output, err := curlCmd.CombinedOutput() - require.NoError(c.t, err, "Error calling metrics endpoint: %s", string(output)) - require.Contains(c.t, string(output), "200 OK", "Metrics endpoint did not return 200 OK") + // Combine stdout + stderr + combined := stdout.String() + stderr.String() + require.Contains(c.t, combined, "200 OK", "Metrics endpoint did not return 200 OK") } -// cleanup removes the created resources. Uses a context with timeout to prevent hangs. -func (c *MetricsTestConfig) cleanup() { +// cleanup deletes the test resources +func (c *MetricsTestConfig) cleanup(ctx context.Context) { c.t.Log("Cleaning up resources") - _ = exec.Command(c.client, "delete", "clusterrolebinding", c.clusterBinding, "--ignore-not-found=true", "--force").Run() - _ = exec.Command(c.client, "delete", "pod", c.curlPodName, "-n", c.namespace, "--ignore-not-found=true", "--force").Run() + policy := metav1.DeletePropagationForeground - // Create a context with a 60-second timeout. - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() + // Delete the ClusterRoleBinding + _ = c.kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, c.clusterBinding, metav1.DeleteOptions{ + PropagationPolicy: &policy, + }) + waitForClusterRoleBindingDeletion(ctx, c.t, c.kubeClient, c.clusterBinding) - // Wait for the ClusterRoleBinding to be deleted. - if err := waitForDeletion(ctx, c.client, "clusterrolebinding", c.clusterBinding); err != nil { - c.t.Logf("Error waiting for clusterrolebinding deletion: %v", err) - } else { - c.t.Log("ClusterRoleBinding deleted") - } + // "Force" delete the Pod by setting grace period to 0 + gracePeriod := int64(0) + _ = c.kubeClient.CoreV1().Pods(c.namespace).Delete(ctx, c.curlPodName, metav1.DeleteOptions{ + GracePeriodSeconds: &gracePeriod, + PropagationPolicy: &policy, + }) + waitForPodDeletion(ctx, c.t, c.kubeClient, c.namespace, c.curlPodName) +} - // Wait for the Pod to be deleted. - if err := waitForDeletion(ctx, c.client, "pod", c.curlPodName, "-n", c.namespace); err != nil { - c.t.Logf("Error waiting for pod deletion: %v", err) +// waitForClusterRoleBindingDeletion polls until the named ClusterRoleBinding no longer exists +func waitForClusterRoleBindingDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, name string) { + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { + _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return true, nil + } + return false, err + } + return false, nil + }) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("Timed out waiting for ClusterRoleBinding %q to be deleted", name) + } + t.Logf("Error waiting for ClusterRoleBinding %q deletion: %v", name, err) } else { - c.t.Log("Pod deleted") + t.Logf("ClusterRoleBinding %q deleted", name) } } -// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted -// or until the 60-second timeout is reached. -func waitForDeletion(ctx context.Context, client, resourceType, resourceName string, extraArgs ...string) error { - args := []string{"wait", "--for=delete", resourceType, resourceName} - args = append(args, extraArgs...) - args = append(args, "--timeout=60s") - cmd := exec.CommandContext(ctx, client, args...) - output, err := cmd.CombinedOutput() +// waitForPodDeletion polls until the named Pod no longer exists +func waitForPodDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace, name string) { + err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 90*time.Second, false, func(ctx context.Context) (bool, error) { + pod, getErr := kubeClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if getErr != nil { + // The standard "not found" check + if strings.Contains(getErr.Error(), "not found") { + return true, nil + } + return false, getErr + } + // Some extra log info if the Pod is still around + t.Logf("Pod %q still present, phase=%q, deleting... (Timestamp=%v)", + name, pod.Status.Phase, pod.DeletionTimestamp) + return false, nil + }) if err != nil { - return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output)) + if errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("Timed out waiting for Pod %q to be deleted", name) + } + t.Logf("Error waiting for Pod %q deletion: %v", name, err) + } else { + t.Logf("Pod %q deleted", name) } - return nil } -// getComponentNamespace returns the namespace where operator-controller or catalogd is running -func getComponentNamespace(t *testing.T, client, selector string) string { - cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error determining namespace: %s", string(output)) +// getComponentNamespace identifies which Namespace is running a Pod that matches `selector` +func getComponentNamespace(t *testing.T, kubeClient kubernetes.Interface, selector string) string { + t.Logf("Listing pods for selector %q to discover namespace", selector) + ctx := context.Background() - namespace := string(bytes.TrimSpace(output)) + pods, err := kubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + require.NoError(t, err, "Error listing pods for selector %q", selector) + require.NotEmpty(t, pods.Items, "No pods found for selector %q", selector) + + namespace := pods.Items[0].Namespace if namespace == "" { - t.Fatal("No namespace found for selector " + selector) + t.Fatalf("No namespace found for selector %q", selector) } return namespace } -func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) { - var outOnly, outAndErr bytes.Buffer - allWriter := io.MultiWriter(&outOnly, &outAndErr) - cmd.Stdout = allWriter - cmd.Stderr = &outAndErr - err := cmd.Run() - return outOnly.Bytes(), outAndErr.Bytes(), err +// Helpers for pointers +func boolPtr(b bool) *bool { + return &b +} + +func int64Ptr(i int64) *int64 { + return &i } diff --git a/test/utils/utils.go b/test/utils/utils.go deleted file mode 100644 index 1acc55fe6..000000000 --- a/test/utils/utils.go +++ /dev/null @@ -1,69 +0,0 @@ -package utils - -import ( - "context" - "fmt" - "io" - "net/url" - "os/exec" - "strings" - "testing" - - "k8s.io/client-go/kubernetes" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" -) - -// FindK8sClient returns the first available Kubernetes CLI client from the system, -// It checks for the existence of each client by running `version --client`. -// If no suitable client is found, the function terminates the test with a failure. -func FindK8sClient(t *testing.T) string { - t.Logf("Finding kubectl client") - clients := []string{"kubectl", "oc"} - for _, c := range clients { - // Would prefer to use `command -v`, but even that may not be installed! - if err := exec.Command(c, "version", "--client").Run(); err == nil { - t.Logf("Using %q as k8s client", c) - return c - } - } - t.Fatal("k8s client not found") - return "" -} - -func ReadTestCatalogServerContents(ctx context.Context, catalog *ocv1.ClusterCatalog, kubeClient kubernetes.Interface) ([]byte, error) { - if catalog == nil { - return nil, fmt.Errorf("cannot read nil catalog") - } - if catalog.Status.URLs == nil { - return nil, fmt.Errorf("catalog %q has no catalog urls", catalog.Name) - } - url, err := url.Parse(catalog.Status.URLs.Base) - if err != nil { - return nil, fmt.Errorf("error parsing clustercatalog url %q: %v", catalog.Status.URLs.Base, err) - } - // url is expected to be in the format of - // http://{service_name}.{namespace}.svc/catalogs/{catalog_name}/ - // so to get the namespace and name of the service we grab only - // the hostname and split it on the '.' character - ns := strings.Split(url.Hostname(), ".")[1] - name := strings.Split(url.Hostname(), ".")[0] - port := url.Port() - // the ProxyGet() call below needs an explicit port value, so if - // value from url.Port() is empty, we assume port 443. - if port == "" { - if url.Scheme == "https" { - port = "443" - } else { - port = "80" - } - } - resp := kubeClient.CoreV1().Services(ns).ProxyGet(url.Scheme, name, port, url.JoinPath("api", "v1", "all").Path, map[string]string{}) - rc, err := resp.Stream(ctx) - if err != nil { - return nil, err - } - defer rc.Close() - - return io.ReadAll(rc) -} From dd0154eb113c50e7ea14013b8b02ddaf57d07534 Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Mon, 3 Mar 2025 08:16:58 -0500 Subject: [PATCH 3/9] Rm pointer helpers, use k8s ones. Signed-off-by: Brett Tofel --- test/e2e/metrics_test.go | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go index a1a2fbd4e..8308ead94 100644 --- a/test/e2e/metrics_test.go +++ b/test/e2e/metrics_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/config" ) @@ -199,19 +200,19 @@ func (c *MetricsTestConfig) createCurlMetricsPod(ctx context.Context) { }, Spec: corev1.PodSpec{ ServiceAccountName: c.serviceAccount, - TerminationGracePeriodSeconds: int64Ptr(0), + TerminationGracePeriodSeconds: ptr.To(int64(0)), Containers: []corev1.Container{ { Name: "curl", Image: "curlimages/curl", Command: []string{"sh", "-c", "sleep 3600"}, SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: boolPtr(false), + AllowPrivilegeEscalation: ptr.To(false), Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, - RunAsNonRoot: boolPtr(true), - RunAsUser: int64Ptr(1000), + RunAsNonRoot: ptr.To(true), + RunAsUser: ptr.To(int64(1000)), SeccompProfile: &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, }, @@ -382,12 +383,3 @@ func getComponentNamespace(t *testing.T, kubeClient kubernetes.Interface, select } return namespace } - -// Helpers for pointers -func boolPtr(b bool) *bool { - return &b -} - -func int64Ptr(i int64) *int64 { - return &i -} From 37d7a2a082531425be25f682481379947558d449 Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Mon, 3 Mar 2025 10:51:39 -0500 Subject: [PATCH 4/9] Rename main test client to globalClient (de-shadow "c") Signed-off-by: Brett Tofel --- test/e2e/cluster_extension_install_test.go | 146 ++++++++++----------- test/e2e/e2e_suite_test.go | 12 +- 2 files changed, 79 insertions(+), 79 deletions(-) diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index a01124bfb..f526fc6c5 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -38,7 +38,7 @@ func createNamespace(ctx context.Context, name string) (*corev1.Namespace, error Name: name, }, } - err := c.Create(ctx, ns) + err := globalClient.Create(ctx, ns) if err != nil { return nil, err } @@ -52,7 +52,7 @@ func createServiceAccount(ctx context.Context, name types.NamespacedName, cluste Namespace: name.Namespace, }, } - err := c.Create(ctx, sa) + err := globalClient.Create(ctx, sa) if err != nil { return nil, err } @@ -156,7 +156,7 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core }, }, } - err := c.Create(ctx, cr) + err := globalClient.Create(ctx, cr) if err != nil { return err } @@ -177,7 +177,7 @@ func createClusterRoleAndBindingForSA(ctx context.Context, name string, sa *core Name: name, }, } - err = c.Create(ctx, crb) + err = globalClient.Create(ctx, crb) if err != nil { return err } @@ -219,7 +219,7 @@ func validateCatalogUnpack(t *testing.T) { catalog := &ocv1.ClusterCatalog{} t.Log("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True and reason == Succeeded") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) assert.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeProgressing) assert.NotNil(ct, cond) @@ -234,7 +234,7 @@ func validateCatalogUnpack(t *testing.T) { t.Log("Ensuring ClusterCatalog has Status.Condition of Type = Serving with status == True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - err := c.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: testCatalogName}, catalog) assert.NoError(ct, err) cond := apimeta.FindStatusCondition(catalog.Status.Conditions, ocv1.TypeServing) assert.NotNil(ct, cond) @@ -251,7 +251,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for CustomResourceDefinitions of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &apiextensionsv1.CustomResourceDefinitionList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 5*pollDuration, pollInterval) @@ -259,7 +259,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for ClusterRoleBindings of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &rbacv1.ClusterRoleBindingList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 2*pollDuration, pollInterval) @@ -267,7 +267,7 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { t.Logf("By waiting for ClusterRoles of %q to be deleted", clusterExtensionName) require.EventuallyWithT(t, func(ct *assert.CollectT) { list := &rbacv1.ClusterRoleList{} - err := c.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) + err := globalClient.List(context.Background(), list, client.MatchingLabelsSelector{Selector: ls.AsSelector()}) assert.NoError(ct, err) assert.Empty(ct, list.Items) }, 2*pollDuration, pollInterval) @@ -275,32 +275,32 @@ func ensureNoExtensionResources(t *testing.T, clusterExtensionName string) { func testCleanup(t *testing.T, cat *ocv1.ClusterCatalog, clusterExtension *ocv1.ClusterExtension, sa *corev1.ServiceAccount, ns *corev1.Namespace) { t.Logf("By deleting ClusterCatalog %q", cat.Name) - require.NoError(t, c.Delete(context.Background(), cat)) + require.NoError(t, globalClient.Delete(context.Background(), cat)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) t.Logf("By deleting ClusterExtension %q", clusterExtension.Name) - require.NoError(t, c.Delete(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Delete(context.Background(), clusterExtension)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, &ocv1.ClusterExtension{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) t.Logf("By deleting ServiceAccount %q", sa.Name) - require.NoError(t, c.Delete(context.Background(), sa)) + require.NoError(t, globalClient.Delete(context.Background(), sa)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, &corev1.ServiceAccount{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) ensureNoExtensionResources(t, clusterExtension.Name) t.Logf("By deleting Namespace %q", ns.Name) - require.NoError(t, c.Delete(context.Background(), ns)) + require.NoError(t, globalClient.Delete(context.Background(), ns)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: ns.Name}, &corev1.Namespace{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) } @@ -330,7 +330,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -349,16 +349,16 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting progressing as True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -368,7 +368,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -390,7 +390,7 @@ func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -419,15 +419,15 @@ prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, }, } - require.NoError(t, c.Update(context.Background(), &cm)) + require.NoError(t, globalClient.Update(context.Background(), &cm)) t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, 2*time.Minute, pollInterval) // Give the check 2 minutes instead of the typical 1 for the pod's @@ -436,7 +436,7 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, // ConfigMap cache TTL of 1 minute = 2 minutes t.Log("By eventually reporting progressing as True") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -446,7 +446,7 @@ location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -465,11 +465,11 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) defer func(cat *ocv1.ClusterCatalog) { - require.NoError(t, c.Delete(context.Background(), cat)) + require.NoError(t, globalClient.Delete(context.Background(), cat)) require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) + err := globalClient.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) return errors.IsNotFound(err) }, pollDuration, pollInterval) }(extraCatalog) @@ -488,16 +488,16 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { } t.Log("It resolves to multiple bundle paths") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a failed resolution with multiple bundles") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True and Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -513,7 +513,7 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -530,10 +530,10 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful installation") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) assert.Equal(ct, &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{ Name: "test-operator.1.0.0", @@ -553,15 +553,15 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { t.Log("By updating the ClusterExtension resource to a non-successor version") // 1.2.0 does not replace/skip/skipRange 1.0.0. clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting an unsatisfiable resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True and Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, ocv1.ReasonRetrying, cond.Reason) @@ -576,7 +576,7 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -592,10 +592,10 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -608,10 +608,10 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { // 1.2.0 does not replace/skip/skipRange 1.0.0. clusterExtension.Spec.Source.Catalog.Version = "1.2.0" clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting a satisfiable resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -625,7 +625,7 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("When resolving upgrade edges") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -641,10 +641,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { Name: sa.Name, }, } - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -656,10 +656,10 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("By updating the ClusterExtension resource by skipping versions") // 1.0.1 replaces 1.0.0 in the test catalog clusterExtension.Spec.Source.Catalog.Version = "1.0.1" - require.NoError(t, c.Update(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Update(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -673,7 +673,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("It resolves again when a catalog is patched with new ImageRef") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -698,11 +698,11 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -716,7 +716,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { err := patchTestCatalog(context.Background(), testCatalogName, updatedCatalogImage) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -726,7 +726,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -760,7 +760,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -779,11 +779,11 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -797,7 +797,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { err = crane.Tag(v2Image, latestImageTag, crane.Insecure) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -807,7 +807,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -821,7 +821,7 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T t.Log("It resolves again when managed content is changed") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -840,11 +840,11 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T } t.Log("It installs the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By reporting a successful installation") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -860,11 +860,11 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T Namespace: clusterExtension.Spec.Namespace, }, } - require.NoError(t, c.Delete(context.Background(), testConfigMap)) + require.NoError(t, globalClient.Delete(context.Background(), testConfigMap)) t.Log("By eventually re-creating the managed resource") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) }, pollDuration, pollInterval) } @@ -881,10 +881,10 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes Namespace: ns.Name, }, } - err := c.Create(context.Background(), sa) + err := globalClient.Create(context.Background(), sa) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -903,16 +903,16 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes } t.Log("It resolves the specified package with correct bundle path") t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) + require.NoError(t, globalClient.Create(context.Background(), clusterExtension)) t.Log("By eventually reporting a successful resolution and bundle path") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) }, pollDuration, pollInterval) t.Log("By eventually reporting Progressing == True with Reason Retrying") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -922,7 +922,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes t.Log("By eventually failing to install the package successfully due to insufficient ServiceAccount permissions") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionFalse, cond.Status) @@ -940,7 +940,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes // after creating and binding the needed permissions to the ServiceAccount. t.Log("By eventually installing the package successfully") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) @@ -952,7 +952,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes t.Log("By eventually reporting Progressing == True with Reason Success") require.EventuallyWithT(t, func(ct *assert.CollectT) { - assert.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + assert.NoError(ct, globalClient.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) if assert.NotNil(ct, cond) { assert.Equal(ct, metav1.ConditionTrue, cond.Status) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 354ef75f4..1b52364d3 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -18,8 +18,8 @@ import ( ) var ( - cfg *rest.Config - c client.Client + cfg *rest.Config + globalClient client.Client ) const ( @@ -33,7 +33,7 @@ func TestMain(m *testing.M) { var err error utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + globalClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) utilruntime.Must(err) os.Exit(m.Run()) @@ -61,7 +61,7 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1 }, } - err := c.Create(ctx, catalog) + err := globalClient.Create(ctx, catalog) return catalog, err } @@ -71,7 +71,7 @@ func createTestCatalog(ctx context.Context, name string, imageRef string) (*ocv1 func patchTestCatalog(ctx context.Context, name string, newImageRef string) error { // Fetch the existing ClusterCatalog catalog := &ocv1.ClusterCatalog{} - err := c.Get(ctx, client.ObjectKey{Name: name}, catalog) + err := globalClient.Get(ctx, client.ObjectKey{Name: name}, catalog) if err != nil { return err } @@ -80,7 +80,7 @@ func patchTestCatalog(ctx context.Context, name string, newImageRef string) erro catalog.Spec.Source.Image.Ref = newImageRef // Patch the ClusterCatalog - err = c.Update(ctx, catalog) + err = globalClient.Update(ctx, catalog) if err != nil { return err } From 562cba657ad66c39154f6eb8379b2734e93eb55b Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Mon, 3 Mar 2025 11:08:54 -0500 Subject: [PATCH 5/9] Use apierrors, move deferred cleanup, less nesting Signed-off-by: Brett Tofel --- test/e2e/metrics_test.go | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go index 8308ead94..3d15035b8 100644 --- a/test/e2e/metrics_test.go +++ b/test/e2e/metrics_test.go @@ -25,6 +25,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -73,14 +74,10 @@ func TestCatalogdMetricsExportedEndpoint(t *testing.T) { func findK8sClient(t *testing.T) (kubernetes.Interface, *rest.Config) { cfg, err := config.GetConfig() - if err != nil { - t.Fatalf("Failed to get Kubernetes config: %v", err) - } + require.NoError(t, err, "Failed to get Kubernetes config") clientset, err := kubernetes.NewForConfig(cfg) - if err != nil { - t.Fatalf("Failed to create client from config: %v", err) - } + require.NoError(t, err, "Failed to create client from config") t.Log("Successfully created Kubernetes client via controller-runtime config") return clientset, cfg @@ -133,13 +130,13 @@ func NewMetricsTestConfig( // run executes the entire test flow func (c *MetricsTestConfig) run() { ctx := context.Background() + defer c.cleanup(ctx) c.createMetricsClusterRoleBinding(ctx) token := c.getServiceAccountToken(ctx) c.createCurlMetricsPod(ctx) c.waitForPodReady(ctx) // Exec `curl` in the Pod to validate the metrics c.validateMetricsEndpoint(ctx, token) - defer c.cleanup(ctx) } // createMetricsClusterRoleBinding to bind the cluster role so metrics are accessible @@ -242,13 +239,10 @@ func (c *MetricsTestConfig) waitForPodReady(ctx context.Context) { } return false, nil }) - if err != nil { - // If the context timed out, the test should fail with a more direct message - if errors.Is(err, context.DeadlineExceeded) { - c.t.Fatal("Timed out waiting for the curl pod to become Ready") - } - require.NoError(c.t, err, "Error waiting for curl pod to become Ready") + if errors.Is(err, context.DeadlineExceeded) { + c.t.Fatal("Timed out waiting for the curl pod to become Ready") } + require.NoError(c.t, err, "Error waiting for curl pod to become Ready") } // validateMetricsEndpoint performs `kubectl exec ... curl ` logic @@ -323,7 +317,7 @@ func waitForClusterRoleBindingDeletion(ctx context.Context, t *testing.T, kubeCl err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { _, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) if err != nil { - if strings.Contains(err.Error(), "not found") { + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -345,8 +339,7 @@ func waitForPodDeletion(ctx context.Context, t *testing.T, kubeClient kubernetes err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 90*time.Second, false, func(ctx context.Context) (bool, error) { pod, getErr := kubeClient.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) if getErr != nil { - // The standard "not found" check - if strings.Contains(getErr.Error(), "not found") { + if apierrors.IsNotFound(getErr) { return true, nil } return false, getErr From c426735fc756e0254bc9262d2f0c09fafbf0468f Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Mon, 3 Mar 2025 13:37:28 -0500 Subject: [PATCH 6/9] Rename main test cfg->globalConfig de-shadow "cfg" Signed-off-by: Brett Tofel --- test/e2e/cluster_extension_install_test.go | 20 ++++++++++---------- test/e2e/e2e_suite_test.go | 6 +++--- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go index f526fc6c5..7c57a078c 100644 --- a/test/e2e/cluster_extension_install_test.go +++ b/test/e2e/cluster_extension_install_test.go @@ -330,7 +330,7 @@ func TestClusterExtensionInstallRegistry(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -390,7 +390,7 @@ func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -465,7 +465,7 @@ func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) defer func(cat *ocv1.ClusterCatalog) { require.NoError(t, globalClient.Delete(context.Background(), cat)) require.Eventually(t, func() bool { @@ -513,7 +513,7 @@ func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -576,7 +576,7 @@ func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -625,7 +625,7 @@ func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { t.Log("When resolving upgrade edges") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) t.Log("By creating an ClusterExtension at a specified version") clusterExtension.Spec = ocv1.ClusterExtensionSpec{ @@ -673,7 +673,7 @@ func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { t.Log("It resolves again when a catalog is patched with new ImageRef") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -760,7 +760,7 @@ func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { sa, err := createServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -821,7 +821,7 @@ func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T t.Log("It resolves again when managed content is changed") clusterExtension, extensionCatalog, sa, ns := testInit(t) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ @@ -884,7 +884,7 @@ func TestClusterExtensionRecoversFromInitialInstallFailedWhenFailureFixed(t *tes err := globalClient.Create(context.Background(), sa) require.NoError(t, err) defer testCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer utils.CollectTestArtifacts(t, artifactName, globalClient, cfg) + defer utils.CollectTestArtifacts(t, artifactName, globalClient, globalConfig) clusterExtension.Spec = ocv1.ClusterExtensionSpec{ Source: ocv1.SourceConfig{ diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 1b52364d3..7441d1f0b 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -18,7 +18,7 @@ import ( ) var ( - cfg *rest.Config + globalConfig *rest.Config globalClient client.Client ) @@ -29,11 +29,11 @@ const ( ) func TestMain(m *testing.M) { - cfg = ctrl.GetConfigOrDie() + globalConfig = ctrl.GetConfigOrDie() var err error utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - globalClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + globalClient, err = client.New(globalConfig, client.Options{Scheme: scheme.Scheme}) utilruntime.Must(err) os.Exit(m.Run()) From d09f74d41a6493f2576072f33a6f8307f52a5da3 Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Mon, 3 Mar 2025 13:59:41 -0500 Subject: [PATCH 7/9] Allow test-unit filter on package and test Have to have package filter too, to not run all the unit tests due to UNIT_TEST_DIRS Signed-off-by: Brett Tofel --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2ef7afa0a..604b52a50 100644 --- a/Makefile +++ b/Makefile @@ -224,7 +224,8 @@ test-unit: $(SETUP_ENVTEST) envtest-k8s-bins #HELP Run the unit tests -tags '$(GO_BUILD_TAGS)' \ -cover -coverprofile ${ROOT_DIR}/coverage/unit.out \ -count=1 -race -short \ - $(UNIT_TEST_DIRS) \ + -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" \ + $(if $(TEST_PKG),$(TEST_PKG),$(UNIT_TEST_DIRS)) \ -test.gocoverdir=$(COVERAGE_UNIT_DIR) .PHONY: image-registry From e82ebca01ea91840f2f386870a8a2d40cf8f1f55 Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Mon, 3 Mar 2025 15:18:21 -0500 Subject: [PATCH 8/9] Drop UNIT_TEST_DIRS for TEST_PKGS + TEST_FILTER Signed-off-by: Brett Tofel --- Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 604b52a50..9bf6c4c32 100644 --- a/Makefile +++ b/Makefile @@ -208,7 +208,11 @@ test-ext-dev-e2e: $(OPERATOR_SDK) $(KUSTOMIZE) $(KIND) #HELP Run extension creat test/extension-developer-e2e/setup.sh $(OPERATOR_SDK) $(CONTAINER_RUNTIME) $(KUSTOMIZE) $(KIND) $(KIND_CLUSTER_NAME) $(E2E_REGISTRY_NAMESPACE) go test -count=1 -v ./test/extension-developer-e2e/... -UNIT_TEST_DIRS := $(shell go list ./... | grep -v /test/) +ENVTEST_VERSION := $(shell go list -m k8s.io/client-go | cut -d" " -f2 | sed 's/^v0\.\([[:digit:]]\{1,\}\)\.[[:digit:]]\{1,\}$$/1.\1.x/') +# Define TEST_PKGS to be either user-specified or a default set of packages: +ifeq ($(origin TEST_PKGS), undefined) +TEST_PKGS := $(shell go list ./... | grep -v /test/) +endif COVERAGE_UNIT_DIR := $(ROOT_DIR)/coverage/unit .PHONY: envtest-k8s-bins #HELP Uses setup-envtest to download and install the binaries required to run ENVTEST-test based locally at the project/bin directory. @@ -225,7 +229,7 @@ test-unit: $(SETUP_ENVTEST) envtest-k8s-bins #HELP Run the unit tests -cover -coverprofile ${ROOT_DIR}/coverage/unit.out \ -count=1 -race -short \ -run "$(if $(TEST_FILTER),$(TEST_FILTER),.)" \ - $(if $(TEST_PKG),$(TEST_PKG),$(UNIT_TEST_DIRS)) \ + $(TEST_PKGS) \ -test.gocoverdir=$(COVERAGE_UNIT_DIR) .PHONY: image-registry From 600fb095481f6f2c30fc26307159fb5b979e7af2 Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Wed, 5 Mar 2025 13:38:41 -0500 Subject: [PATCH 9/9] Remove ENVTEST_VERSION dupe in Makefile Signed-off-by: Brett Tofel --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 9bf6c4c32..c407ade2d 100644 --- a/Makefile +++ b/Makefile @@ -208,7 +208,6 @@ test-ext-dev-e2e: $(OPERATOR_SDK) $(KUSTOMIZE) $(KIND) #HELP Run extension creat test/extension-developer-e2e/setup.sh $(OPERATOR_SDK) $(CONTAINER_RUNTIME) $(KUSTOMIZE) $(KIND) $(KIND_CLUSTER_NAME) $(E2E_REGISTRY_NAMESPACE) go test -count=1 -v ./test/extension-developer-e2e/... -ENVTEST_VERSION := $(shell go list -m k8s.io/client-go | cut -d" " -f2 | sed 's/^v0\.\([[:digit:]]\{1,\}\)\.[[:digit:]]\{1,\}$$/1.\1.x/') # Define TEST_PKGS to be either user-specified or a default set of packages: ifeq ($(origin TEST_PKGS), undefined) TEST_PKGS := $(shell go list ./... | grep -v /test/)