Skip to content

Commit bb3a29f

Browse files
committed
fix(test): configuration changes and fixes needed to scale-test
Signed-off-by: Alex Castilio dos Santos <[email protected]>
1 parent f574448 commit bb3a29f

14 files changed

+265
-128
lines changed

.github/workflows/scale-test.yaml

+3-2
Original file line numberDiff line numberDiff line change
@@ -96,11 +96,12 @@ jobs:
9696
NUM_REPLICAS: ${{ inputs.num_replicas }}
9797
NUM_NETPOLS: ${{ inputs.num_netpol }}
9898
CLEANUP: ${{ inputs.cleanup }}
99-
IMAGE_REGISTRY: ${{ inputs.image_namespace == '' && vars.ACR_NAME || inputs.image_namespace }}
99+
IMAGE_REGISTRY: ${{ vars.ACR_NAME }}
100100
IMAGE_NAMESPACE: ${{ github.repository }}
101101
TAG: ${{ inputs.image_tag }}
102102
AZURE_APP_INSIGHTS_KEY: ${{ secrets.AZURE_APP_INSIGHTS_KEY }}
103103
shell: bash
104104
run: |
105105
set -euo pipefail
106-
go test -v ./test/e2e/. -timeout 300m -tags=scale -count=1 -args -image-tag=$( [[ $TAG == "" ]] && make version || echo $TAG ) -create-infra=false -delete-infra=false
106+
[[ $TAG == "" ]] && TAG=$(make version)
107+
go test -v ./test/e2e/. -timeout 300m -tags=scale -count=1 -args -create-infra=false -delete-infra=false

test/e2e/common/common.go

+1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ const (
2222
KubeSystemNamespace = "kube-system"
2323
TestPodNamespace = "kube-system-test"
2424
AzureAppInsightsKeyEnv = "AZURE_APP_INSIGHTS_KEY"
25+
OutputFilePathEnv = "OUTPUT_FILEPATH"
2526
)
2627

2728
var (

test/e2e/framework/kubernetes/check-pod-status.go

+10-19
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@ import (
1414
)
1515

1616
const (
17-
RetryTimeoutPodsReady = 5 * time.Minute
18-
RetryIntervalPodsReady = 5 * time.Second
17+
RetryTimeoutPodsReady = 5 * time.Minute
18+
RetryIntervalPodsReady = 5 * time.Second
19+
timeoutWaitForPodsSeconds = 1200
1920

2021
printInterval = 5 // print to stdout every 5 iterations
2122
)
@@ -48,7 +49,7 @@ func (w *WaitPodsReady) Run() error {
4849
return fmt.Errorf("error creating Kubernetes client: %w", err)
4950
}
5051

51-
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeoutSeconds*time.Second)
52+
ctx, cancel := context.WithTimeout(context.Background(), timeoutWaitForPodsSeconds*time.Second)
5253
defer cancel()
5354

5455
return WaitForPodReady(ctx, clientset, w.Namespace, w.LabelSelector)
@@ -60,7 +61,6 @@ func (w *WaitPodsReady) Stop() error {
6061
}
6162

6263
func WaitForPodReady(ctx context.Context, clientset *kubernetes.Clientset, namespace, labelSelector string) error {
63-
podReadyMap := make(map[string]bool)
6464

6565
printIterator := 0
6666
conditionFunc := wait.ConditionWithContextFunc(func(context.Context) (bool, error) {
@@ -78,34 +78,25 @@ func WaitForPodReady(ctx context.Context, clientset *kubernetes.Clientset, names
7878
return false, nil
7979
}
8080

81-
// check each indviidual pod to see if it's in Running state
81+
// check each individual pod to see if it's in Running state
8282
for i := range podList.Items {
83-
var pod *corev1.Pod
84-
pod, err = clientset.CoreV1().Pods(namespace).Get(ctx, podList.Items[i].Name, metav1.GetOptions{})
85-
if err != nil {
86-
return false, fmt.Errorf("error getting Pod: %w", err)
87-
}
8883

8984
// Check the Pod phase
90-
if pod.Status.Phase != corev1.PodRunning {
85+
if podList.Items[i].Status.Phase != corev1.PodRunning {
9186
if printIterator%printInterval == 0 {
92-
log.Printf("pod \"%s\" is not in Running state yet. Waiting...\n", pod.Name)
87+
log.Printf("pod \"%s\" is not in Running state yet. Waiting...\n", podList.Items[i].Name)
9388
}
9489
return false, nil
9590
}
9691

9792
// Check all container status.
98-
for _, containerStatus := range pod.Status.ContainerStatuses {
99-
if !containerStatus.Ready {
100-
log.Printf("container \"%s\" in pod \"%s\" is not ready yet. Waiting...\n", containerStatus.Name, pod.Name)
93+
for i := range podList.Items[i].Status.ContainerStatuses {
94+
if !podList.Items[i].Status.ContainerStatuses[i].Ready {
95+
log.Printf("container \"%s\" in pod \"%s\" is not ready yet. Waiting...\n", podList.Items[i].Status.ContainerStatuses[i].Name, podList.Items[i].Name)
10196
return false, nil
10297
}
10398
}
10499

105-
if !podReadyMap[pod.Name] {
106-
log.Printf("pod \"%s\" is in Running state\n", pod.Name)
107-
podReadyMap[pod.Name] = true
108-
}
109100
}
110101
log.Printf("all pods in namespace \"%s\" with label \"%s\" are in Running state\n", namespace, labelSelector)
111102
return true, nil

test/e2e/framework/kubernetes/create-kapinger-deployment.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ func (c *CreateKapingerDeployment) GetKapingerDeployment() *appsv1.Deployment {
138138
"memory": resource.MustParse("20Mi"),
139139
},
140140
Limits: v1.ResourceList{
141-
"memory": resource.MustParse("20Mi"),
141+
"memory": resource.MustParse("100Mi"),
142142
},
143143
},
144144
Ports: []v1.ContainerPort{

test/e2e/framework/kubernetes/delete-namespace.go

+8-2
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@ import (
1414
"k8s.io/client-go/util/retry"
1515
)
1616

17+
const (
18+
deleteNamespaceTimeoutSeconds = 1200
19+
)
20+
1721
type DeleteNamespace struct {
1822
Namespace string
1923
KubeConfigFilePath string
@@ -30,7 +34,7 @@ func (d *DeleteNamespace) Run() error {
3034
return fmt.Errorf("error creating Kubernetes client: %w", err)
3135
}
3236

33-
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeoutSeconds*time.Second)
37+
ctx, cancel := context.WithTimeout(context.Background(), deleteNamespaceTimeoutSeconds*time.Second)
3438
defer cancel()
3539

3640
err = clientset.CoreV1().Namespaces().Delete(ctx, d.Namespace, metaV1.DeleteOptions{})
@@ -40,8 +44,10 @@ func (d *DeleteNamespace) Run() error {
4044
}
4145
}
4246

47+
numberOfSteps := 9
48+
4349
backoff := wait.Backoff{
44-
Steps: 6,
50+
Steps: numberOfSteps,
4551
Duration: 10 * time.Second,
4652
Factor: 2.0,
4753
// Jitter: 0.1,

test/e2e/framework/kubernetes/install-retina-helm.go

+1
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ func (i *InstallHelmChart) Run() error {
9191
chart.Values["image"].(map[string]interface{})["repository"] = imageRegistry + "/" + imageNamespace + "/retina-agent"
9292
chart.Values["image"].(map[string]interface{})["initRepository"] = imageRegistry + "/" + imageNamespace + "/retina-init"
9393
chart.Values["operator"].(map[string]interface{})["repository"] = imageRegistry + "/" + imageNamespace + "/retina-operator"
94+
chart.Values["operator"].(map[string]interface{})["enabled"] = true
9495

9596
getclient := action.NewGet(actionConfig)
9697
release, err := getclient.Run(i.ReleaseName)

test/e2e/framework/scaletest/add-shared-labels.go

+47-18
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import (
44
"context"
55
"encoding/json"
66
"fmt"
7+
"log"
78
"time"
89

910
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -12,6 +13,10 @@ import (
1213
"k8s.io/client-go/tools/clientcmd"
1314
)
1415

16+
const (
17+
timeoutToLabelAllPodsMinutes = 120
18+
)
19+
1520
type patchStringValue struct {
1621
Op string `json:"op"`
1722
Path string `json:"path"`
@@ -50,32 +55,21 @@ func (a *AddSharedLabelsToAllPods) Run() error {
5055
return fmt.Errorf("error creating Kubernetes client: %w", err)
5156
}
5257

53-
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeoutSeconds*time.Second)
58+
ctx, cancel := contextToLabelAllPods()
5459
defer cancel()
5560

5661
resources, err := clientset.CoreV1().Pods(a.Namespace).List(ctx, metav1.ListOptions{})
5762

58-
patch := []patchStringValue{}
59-
60-
for i := 0; i < a.NumSharedLabelsPerPod; i++ {
61-
patch = append(patch, patchStringValue{
62-
Op: "add",
63-
Path: "/metadata/labels/shared-lab-" + fmt.Sprintf("%05d", i),
64-
Value: "val",
65-
})
66-
}
67-
68-
patchBytes, err := json.Marshal(patch)
63+
patchBytes, err := getSharedLabelsPatch(a.NumSharedLabelsPerPod)
6964
if err != nil {
70-
return fmt.Errorf("error marshalling patch: %w", err)
65+
return fmt.Errorf("error getting label patch: %w", err)
7166
}
7267

7368
for _, resource := range resources.Items {
74-
clientset.CoreV1().Pods(a.Namespace).Patch(ctx, resource.Name,
75-
types.JSONPatchType,
76-
patchBytes,
77-
metav1.PatchOptions{},
78-
)
69+
err = patchLabel(ctx, clientset, a.Namespace, resource.Name, patchBytes)
70+
if err != nil {
71+
log.Printf("Error adding shared labels to pod %s: %s\n", resource.Name, err)
72+
}
7973
}
8074

8175
return nil
@@ -85,3 +79,38 @@ func (a *AddSharedLabelsToAllPods) Run() error {
8579
func (a *AddSharedLabelsToAllPods) Stop() error {
8680
return nil
8781
}
82+
83+
func patchLabel(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName string, patchBytes []byte) error {
84+
log.Println("Labeling Pod", podName)
85+
_, err := clientset.CoreV1().Pods(namespace).Patch(ctx, podName,
86+
types.JSONPatchType,
87+
patchBytes,
88+
metav1.PatchOptions{},
89+
)
90+
if err != nil {
91+
return fmt.Errorf("error patching pod: %w", err)
92+
}
93+
94+
return nil
95+
}
96+
97+
func getSharedLabelsPatch(numLabels int) ([]byte, error) {
98+
patch := []patchStringValue{}
99+
for i := 0; i < numLabels; i++ {
100+
patch = append(patch, patchStringValue{
101+
Op: "add",
102+
Path: "/metadata/labels/shared-lab-" + fmt.Sprintf("%05d", i),
103+
Value: "val",
104+
})
105+
}
106+
b, err := json.Marshal(patch)
107+
if err != nil {
108+
return nil, fmt.Errorf("error marshalling patch: %w", err)
109+
}
110+
111+
return b, nil
112+
}
113+
114+
func contextToLabelAllPods() (context.Context, context.CancelFunc) {
115+
return context.WithTimeout(context.Background(), timeoutToLabelAllPodsMinutes*time.Minute)
116+
}

test/e2e/framework/scaletest/add-unique-labels.go

+26-21
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,10 @@
11
package scaletest
22

33
import (
4-
"context"
54
"encoding/json"
65
"fmt"
7-
"time"
86

97
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10-
"k8s.io/apimachinery/pkg/types"
118
"k8s.io/client-go/kubernetes"
129
"k8s.io/client-go/tools/clientcmd"
1310
)
@@ -44,35 +41,23 @@ func (a *AddUniqueLabelsToAllPods) Run() error {
4441
return fmt.Errorf("error creating Kubernetes client: %w", err)
4542
}
4643

47-
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeoutSeconds*time.Second)
44+
ctx, cancel := contextToLabelAllPods()
4845
defer cancel()
4946

5047
resources, err := clientset.CoreV1().Pods(a.Namespace).List(ctx, metav1.ListOptions{})
5148

5249
count := 0
5350

5451
for _, resource := range resources.Items {
55-
patch := []patchStringValue{}
56-
57-
for i := 0; i < a.NumUniqueLabelsPerPod; i++ {
58-
patch = append(patch, patchStringValue{
59-
Op: "add",
60-
Path: "/metadata/labels/uni-lab-" + fmt.Sprintf("%05d", count),
61-
Value: "val",
62-
})
63-
count++
52+
patchBytes, err := getUniqueLabelsPatch(a.NumUniqueLabelsPerPod, &count)
53+
if err != nil {
54+
return fmt.Errorf("error getting label patch: %w", err)
6455
}
6556

66-
patchBytes, err := json.Marshal(patch)
57+
err = patchLabel(ctx, clientset, a.Namespace, resource.Name, patchBytes)
6758
if err != nil {
68-
return fmt.Errorf("error marshalling patch: %w", err)
59+
return fmt.Errorf("error adding unique label to pod: %w", err)
6960
}
70-
71-
clientset.CoreV1().Pods(a.Namespace).Patch(ctx, resource.Name,
72-
types.JSONPatchType,
73-
patchBytes,
74-
metav1.PatchOptions{},
75-
)
7661
}
7762

7863
return nil
@@ -82,3 +67,23 @@ func (a *AddUniqueLabelsToAllPods) Run() error {
8267
func (a *AddUniqueLabelsToAllPods) Stop() error {
8368
return nil
8469
}
70+
71+
func getUniqueLabelsPatch(numLabels int, counter *int) ([]byte, error) {
72+
patch := []patchStringValue{}
73+
74+
for i := 0; i < numLabels; i++ {
75+
patch = append(patch, patchStringValue{
76+
Op: "add",
77+
Path: "/metadata/labels/uni-lab-" + fmt.Sprintf("%05d", *counter),
78+
Value: "val",
79+
})
80+
(*counter)++
81+
}
82+
83+
b, err := json.Marshal(patch)
84+
if err != nil {
85+
return nil, fmt.Errorf("error marshalling patch: %w", err)
86+
}
87+
88+
return b, nil
89+
}

0 commit comments

Comments
 (0)