Skip to content

Commit

Permalink
fix e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
skonto committed Sep 18, 2024
1 parent 0ec66a7 commit 14e7754
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 5 deletions.
2 changes: 1 addition & 1 deletion test/e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ echo ">> Uploading e2e test images..."
ko resolve --jobs=4 -RBf ./test/test_images/metrics-test > /dev/null

kubectl apply -f ./test/resources -n serving-tests
go_test_e2e -timeout=20m -tags=e2e ./test/e2e "${E2E_TEST_FLAGS[@]}" || failed=1
go_test_e2e -timeout=30m -tags=e2e ./test/e2e "${E2E_TEST_FLAGS[@]}" || failed=1

(( failed )) && fail_test

Expand Down
21 changes: 17 additions & 4 deletions test/e2e/autoscale_custom_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@ func TestScaleToZero(t *testing.T) {

// Set the scale metric to 20, which should create 20/target=4 pods
ctxScale.names.URL.RawQuery = "scale=20"
t.Logf("URL: %v", ctxScale.names.URL)
if _, err := pkgTest.CheckEndpointState(
context.Background(),
ctxScale.clients.KubeClient,
Expand All @@ -120,7 +119,7 @@ func TestScaleToZero(t *testing.T) {
}

// Waiting until HPA status is available, as it takes some time until HPA starts collecting metrics.
if err := waitForHPAState(t, ctx.resources.Revision.Name, ctx.resources.Revision.Namespace, ctx.clients); err != nil {
if err := waitForHPAReplicas(t, ctx.resources.Revision.Name, ctx.resources.Revision.Namespace, ctx.clients); err != nil {
t.Fatalf("Error collecting metrics by HPA: %v", err)
}

Expand Down Expand Up @@ -160,7 +159,7 @@ func TestScaleToZero(t *testing.T) {

// Waiting until HPA status is available, as it takes some time until HPA starts collecting metrics again after scale to zero.
// Keda de-activates the HPA if metrics is zero, so we need to wait for it to be active again.
if err := waitForHPAState(t, ctx.resources.Revision.Name, ctx.resources.Revision.Namespace, ctx.clients); err != nil {
if err := waitForHPAReplicas(t, ctx.resources.Revision.Name, ctx.resources.Revision.Namespace, ctx.clients); err != nil {
t.Fatalf("Error collecting metrics by HPA: %v", err)
}
assertAutoscaleUpToNumPods(ctx, targetPods*2, time.After(scaleUpTimeout), true /* quick */)
Expand Down Expand Up @@ -379,7 +378,7 @@ func assertScaleDownToN(ctx *TestContext, n int) {
ctx.t.Fatalf("Waiting for Pod.List to have no non-Evicted pods of %q: %v", deploymentName, err)
}

ctx.t.Logf("The Revision should remain ready after scaling to one.")
ctx.t.Logf("The Revision should remain ready after scaling to %d.", n)
if err := v1test.CheckRevisionState(ctx.clients.ServingClient, ctx.names.Revision, v1test.IsRevisionReady); err != nil {
ctx.t.Fatalf("The Revision %s did not stay Ready after scaling down to one: %v", ctx.names.Revision, err)
}
Expand Down Expand Up @@ -414,6 +413,20 @@ func waitForScaleToOne(t *testing.T, deploymentName string, clients *test.Client
)
}

func waitForHPAReplicas(t *testing.T, name, namespace string, clients *test.Clients) error {
return wait.PollUntilContextTimeout(context.Background(), time.Second, 15*time.Minute, true, func(context.Context) (bool, error) {
hpa, err := clients.KubeClient.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
if hpa.Status.CurrentMetrics == nil || hpa.Status.CurrentReplicas < 1 {
t.Logf("Waiting for hpa.status is available: %#v", hpa.Status)
return false, nil
}
return true, nil
})
}

func waitForHPAState(t *testing.T, name, namespace string, clients *test.Clients) error {
return wait.PollUntilContextTimeout(context.Background(), time.Second, 15*time.Minute, true, func(context.Context) (bool, error) {
hpa, err := clients.KubeClient.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.Background(), name, metav1.GetOptions{})
Expand Down

0 comments on commit 14e7754

Please sign in to comment.