|
| 1 | +package test |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "os/exec" |
| 6 | + "stagger/pkg/blocker" |
| 7 | + "stagger/pkg/controller" |
| 8 | + "time" |
| 9 | + |
| 10 | + . "github.com/onsi/ginkgo/v2" |
| 11 | + . "github.com/onsi/gomega" |
| 12 | + appsv1 "k8s.io/api/apps/v1" |
| 13 | + corev1 "k8s.io/api/core/v1" |
| 14 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 15 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 16 | +) |
| 17 | + |
| 18 | +var _ = Describe("Happy Case Scenario", func() { |
| 19 | + Context("When creating a deployment", func() { |
| 20 | + It("should create deployment and verify pacing", func() { |
| 21 | + // Create a Deployment with the StaggerGroup label |
| 22 | + deploymentName := "test-deployment" |
| 23 | + |
| 24 | + k8sClient, err := client.New(testEnv.Config, client.Options{}) |
| 25 | + Expect(err).ToNot(HaveOccurred()) |
| 26 | + |
| 27 | + replicas := int32(10) |
| 28 | + |
| 29 | + logger.Info("Creating the Deployment", "name", deploymentName, "namespace", Namespace) |
| 30 | + deployment := &appsv1.Deployment{ |
| 31 | + ObjectMeta: metav1.ObjectMeta{ |
| 32 | + Name: deploymentName, |
| 33 | + Namespace: Namespace, |
| 34 | + }, |
| 35 | + Spec: appsv1.DeploymentSpec{ |
| 36 | + Strategy: appsv1.DeploymentStrategy{ |
| 37 | + Type: appsv1.RollingUpdateDeploymentStrategyType, |
| 38 | + }, |
| 39 | + Replicas: &replicas, |
| 40 | + Selector: &metav1.LabelSelector{ |
| 41 | + MatchLabels: map[string]string{ |
| 42 | + "app": "test-app", |
| 43 | + }, |
| 44 | + }, |
| 45 | + Template: corev1.PodTemplateSpec{ |
| 46 | + ObjectMeta: metav1.ObjectMeta{ |
| 47 | + Labels: map[string]string{ |
| 48 | + "app": "test-app", |
| 49 | + controller.DefaultEnableLabel: "1", |
| 50 | + }, |
| 51 | + }, |
| 52 | + Spec: corev1.PodSpec{ |
| 53 | + Containers: []corev1.Container{ |
| 54 | + { |
| 55 | + Name: "busybox", |
| 56 | + Image: "busybox", |
| 57 | + Command: []string{"sleep", "3600"}, |
| 58 | + ReadinessProbe: &corev1.Probe{ |
| 59 | + ProbeHandler: corev1.ProbeHandler{ |
| 60 | + Exec: &corev1.ExecAction{ |
| 61 | + Command: []string{ |
| 62 | + // check if the file /tmp/ready exists |
| 63 | + "test", "-f", "/tmp/ready", |
| 64 | + }, |
| 65 | + }, |
| 66 | + }, |
| 67 | + }, |
| 68 | + }, |
| 69 | + }, |
| 70 | + }, |
| 71 | + }, |
| 72 | + }, |
| 73 | + } |
| 74 | + |
| 75 | + By("Creating the Deployment") |
| 76 | + ctx := context.Background() |
| 77 | + err = k8sClient.Create(ctx, deployment) |
| 78 | + Expect(err).ToNot(HaveOccurred()) |
| 79 | + |
| 80 | + labels := map[string]string{"app": "test-app"} |
| 81 | + |
| 82 | + // Step 1: 0 ready, 1 starting, 9 blocked |
| 83 | + starting := waitForPodsConditionAndReturnStartingPods( |
| 84 | + ctx, |
| 85 | + k8sClient, |
| 86 | + Namespace, |
| 87 | + labels, |
| 88 | + 0, // expectedReady |
| 89 | + 1, // expectedStarting |
| 90 | + 9, // expectedBlocked |
| 91 | + "busybox", // containerName |
| 92 | + time.Minute, // timeout |
| 93 | + 500*time.Millisecond, // interval |
| 94 | + "All must be pending, expect 1 should be starting", // description |
| 95 | + ) |
| 96 | + |
| 97 | + // Make the starting pod ready |
| 98 | + makePodsReady(ctx, starting) |
| 99 | + |
| 100 | + // Step 2: 1 ready, 1 starting, 8 blocked |
| 101 | + starting = waitForPodsConditionAndReturnStartingPods( |
| 102 | + ctx, |
| 103 | + k8sClient, |
| 104 | + Namespace, |
| 105 | + labels, |
| 106 | + 1, // expectedReady |
| 107 | + 1, // expectedStarting |
| 108 | + 8, // expectedBlocked |
| 109 | + "busybox", // containerName |
| 110 | + time.Minute, // timeout |
| 111 | + 500*time.Millisecond, // interval |
| 112 | + "1 pod should be ready, 1 starting, 8 blocked", // description |
| 113 | + ) |
| 114 | + |
| 115 | + // Make the starting pods ready |
| 116 | + makePodsReady(ctx, starting) |
| 117 | + |
| 118 | + // Step 3: 2 ready, 2 starting, 6 blocked |
| 119 | + starting = waitForPodsConditionAndReturnStartingPods( |
| 120 | + ctx, |
| 121 | + k8sClient, |
| 122 | + Namespace, |
| 123 | + labels, |
| 124 | + 2, // expectedReady |
| 125 | + 2, // expectedStarting |
| 126 | + 6, // expectedBlocked |
| 127 | + "busybox", // containerName |
| 128 | + time.Minute, // timeout |
| 129 | + 500*time.Millisecond, // interval |
| 130 | + "2 pods should be ready, 2 starting, 6 blocked", // description |
| 131 | + ) |
| 132 | + |
| 133 | + // Make the starting pods ready |
| 134 | + makePodsReady(ctx, starting) |
| 135 | + |
| 136 | + // Step 4: 4 ready, 4 starting, 2 blocked |
| 137 | + starting = waitForPodsConditionAndReturnStartingPods( |
| 138 | + ctx, |
| 139 | + k8sClient, |
| 140 | + Namespace, |
| 141 | + labels, |
| 142 | + 4, // expectedReady |
| 143 | + 4, // expectedStarting |
| 144 | + 2, // expectedBlocked |
| 145 | + "busybox", // containerName |
| 146 | + time.Minute, // timeout |
| 147 | + 500*time.Millisecond, // interval |
| 148 | + "4 pods should be ready, 4 starting, 2 blocked", // description |
| 149 | + ) |
| 150 | + |
| 151 | + // Make the starting pods ready |
| 152 | + makePodsReady(ctx, starting) |
| 153 | + |
| 154 | + // Step 5: 10 ready, 0 starting, 0 blocked |
| 155 | + starting = waitForPodsConditionAndReturnStartingPods( |
| 156 | + ctx, |
| 157 | + k8sClient, |
| 158 | + Namespace, |
| 159 | + labels, |
| 160 | + 8, // expectedReady |
| 161 | + 2, // expectedStarting |
| 162 | + 0, // expectedBlocked |
| 163 | + "busybox", // containerName |
| 164 | + time.Minute, // timeout |
| 165 | + 500*time.Millisecond, // interval |
| 166 | + "8 pods should be ready, 2 starting, 0 blocked", // description |
| 167 | + ) |
| 168 | + |
| 169 | + makePodsReady(ctx, starting) |
| 170 | + |
| 171 | + // Step 6: 10 ready, 0 starting, 0 blocked |
| 172 | + waitForPodsConditionAndReturnStartingPods( |
| 173 | + ctx, |
| 174 | + k8sClient, |
| 175 | + Namespace, |
| 176 | + labels, |
| 177 | + 10, // expectedReady |
| 178 | + 0, // expectedStarting |
| 179 | + 0, // expectedBlocked |
| 180 | + "busybox", // containerName |
| 181 | + time.Minute, // timeout |
| 182 | + 500*time.Millisecond, // interval |
| 183 | + "10 pods should be ready", // description |
| 184 | + ) |
| 185 | + }) |
| 186 | + }) |
| 187 | +}) |
| 188 | + |
| 189 | +// waitForPodsReady waits until all Pods matching the label selector are running and ready. |
| 190 | +func getPodCounts(ctx context.Context, c client.Client, namespace string, labelSelector map[string]string) (ready, starting, blocked []corev1.Pod, err error) { |
| 191 | + listOpts := []client.ListOption{ |
| 192 | + client.InNamespace(namespace), |
| 193 | + client.MatchingLabels(labelSelector), |
| 194 | + } |
| 195 | + |
| 196 | + blocker := blocker.NewNodeSelectorPodBlocker() |
| 197 | + |
| 198 | + pods := &corev1.PodList{} |
| 199 | + err = c.List(ctx, pods, listOpts...) |
| 200 | + if err != nil { |
| 201 | + logger.Error(err, "Failed to list pods", "namespace", namespace, "labelSelector", labelSelector) |
| 202 | + return ready, starting, blocked, err |
| 203 | + } |
| 204 | + |
| 205 | + if len(pods.Items) == 0 { |
| 206 | + logger.Info("No pods matched the labelSelector", "namespace", namespace, "labelSelector", labelSelector) |
| 207 | + return ready, starting, blocked, nil |
| 208 | + } |
| 209 | + |
| 210 | + for _, pod := range pods.Items { |
| 211 | + if isPodReady(&pod) { |
| 212 | + ready = append(ready, pod) |
| 213 | + } else if blocker.IsBlocked(&pod.Spec) { |
| 214 | + blocked = append(blocked, pod) |
| 215 | + } else { |
| 216 | + starting = append(starting, pod) |
| 217 | + } |
| 218 | + } |
| 219 | + |
| 220 | + return ready, starting, blocked, nil |
| 221 | +} |
| 222 | + |
| 223 | +// waitForPodsConditionAndReturnStartingPods waits until the pods match the expected ready, starting and blocked counts. |
| 224 | +// It also checks if the specified container in the starting pods has started. |
| 225 | +func waitForPodsConditionAndReturnStartingPods( |
| 226 | + ctx context.Context, |
| 227 | + k8sClient client.Client, |
| 228 | + namespace string, |
| 229 | + labels map[string]string, |
| 230 | + expectedReady int, |
| 231 | + expectedStarting int, |
| 232 | + expectedBlocked int, |
| 233 | + containerName string, |
| 234 | + timeout time.Duration, |
| 235 | + interval time.Duration, |
| 236 | + description string, |
| 237 | +) (startingPods []corev1.Pod) { |
| 238 | + By(description) |
| 239 | + Eventually(func() bool { |
| 240 | + ready, starting, blocked, err := getPodCounts(ctx, k8sClient, namespace, labels) |
| 241 | + Expect(err).ToNot(HaveOccurred()) |
| 242 | + if len(ready) != expectedReady || len(starting) != expectedStarting || len(blocked) != expectedBlocked { |
| 243 | + return false |
| 244 | + } |
| 245 | + if expectedStarting > 0 { |
| 246 | + if isContainerStarted(containerName, starting...) { |
| 247 | + startingPods = starting |
| 248 | + return true |
| 249 | + } |
| 250 | + return false |
| 251 | + } |
| 252 | + return true |
| 253 | + }, timeout, interval).Should(BeTrue()) |
| 254 | + |
| 255 | + return startingPods |
| 256 | +} |
| 257 | + |
| 258 | +// makePodsReady marks each pod in the provided list as ready. |
| 259 | +func makePodsReady(ctx context.Context, pods []corev1.Pod) { |
| 260 | + for _, pod := range pods { |
| 261 | + makePodReady(ctx, &pod) |
| 262 | + } |
| 263 | +} |
| 264 | + |
| 265 | +func isContainerStarted(containerName string, pod ...corev1.Pod) bool { |
| 266 | + started := 0 |
| 267 | + for _, p := range pod { |
| 268 | + for _, status := range p.Status.ContainerStatuses { |
| 269 | + if status.Name == containerName && status.Started != nil && *status.Started { |
| 270 | + started++ |
| 271 | + } |
| 272 | + } |
| 273 | + } |
| 274 | + |
| 275 | + return started == len(pod) |
| 276 | +} |
| 277 | + |
| 278 | +func isPodReady(pod *corev1.Pod) bool { |
| 279 | + for _, cond := range pod.Status.Conditions { |
| 280 | + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { |
| 281 | + return true |
| 282 | + } |
| 283 | + } |
| 284 | + return false |
| 285 | +} |
| 286 | + |
| 287 | +func makePodReady(ctx context.Context, pod *corev1.Pod) { |
| 288 | + execCommand := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeConfigPath, "exec", "-n", pod.Namespace, pod.Name, "--", "touch", "/tmp/ready") |
| 289 | + out, err := execCommand.CombinedOutput() |
| 290 | + Expect(err).ToNot(HaveOccurred(), "Output: %s", out) |
| 291 | +} |
0 commit comments