Skip to content

Commit

Permalink
add hpa test and try to fix controller test
Browse files Browse the repository at this point in the history
  • Loading branch information
randytqwjp committed Dec 3, 2024
1 parent b47578c commit 7d07efd
Show file tree
Hide file tree
Showing 4 changed files with 288 additions and 17 deletions.
15 changes: 3 additions & 12 deletions internal/controller/tortoise_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,25 +185,16 @@ func (r *TortoiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
return ctrl.Result{}, err
}
tortoise = tortoiseService.UpdateTortoiseAutoscalingPolicyInStatus(tortoise, hpa, now)
scalingActive := r.HpaService.checkHpaMetricStatus(ctx, hpa)
if scalingActive == false && tortoise.Spec.UpdateMode == autoscalingv1beta3.UpdateModeAuto {
//switch to emergency mode
scalingActive := r.HpaService.CheckHpaMetricStatus(ctx, hpa)
if scalingActive == false && tortoise.Spec.UpdateMode == autoscalingv1beta3.UpdateModeAuto && tortoise.Status.TortoisePhase == autoscalingv1beta3.TortoisePhaseWorking {
//switch to emergency mode only when auto tortoise and already working
tortoise.Spec.UpdateMode = autoscalingv1beta3.UpdateModeEmergency
}
if scalingActive == true && tortoise.Spec.UpdateMode == autoscalingv1beta3.UpdateModeEmergency {
tortoise.Spec.UpdateMode = autoscalingv1beta3.UpdateModeAuto
}

tortoise = r.TortoiseService.UpdateTortoisePhase(tortoise, now)
if tortoise.Status.TortoisePhase == autoscalingv1beta3.TortoisePhaseInitializing {
logger.Info("initializing tortoise", "tortoise", req.NamespacedName)
// need to initialize HPA and VPA.
if err := r.initializeVPAAndHPA(ctx, tortoise, currentDesiredReplicaNum, now); err != nil {
return ctrl.Result{}, fmt.Errorf("initialize VPA and HPA: %w", err)
}

return ctrl.Result{RequeueAfter: r.Interval}, nil
}

// Make sure finalizer is added to tortoise.
tortoise, err = r.TortoiseService.AddFinalizer(ctx, tortoise)
Expand Down
20 changes: 19 additions & 1 deletion internal/controller/tortoise_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,25 @@ func updateResourcesInTestCaseFile(path string, resource resources) error {
}

func removeUnnecessaryFieldsFromHPA(hpa *v2.HorizontalPodAutoscaler) *v2.HorizontalPodAutoscaler {
hpa.Status = v2.HorizontalPodAutoscalerStatus{}
hpa.Status = v2.HorizontalPodAutoscalerStatus{
Conditions: []v2.HorizontalPodAutoscalerCondition{
{
Status: "True",
Type: v2.AbleToScale,
Message: "recommended size matches current size",
},
{
Status: "True",
Type: v2.ScalingActive,
Message: "the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request)",
},
{
Status: "False",
Type: v2.ScalingLimited,
Message: "the desired count is within the acceptable range",
},
},
}
return hpa
}

Expand Down
31 changes: 27 additions & 4 deletions pkg/hpa/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -766,13 +766,36 @@ func (c *Service) excludeExternalMetric(ctx context.Context, hpa *v2.HorizontalP
return newHPA
}

func (c *Service) checkHpaMetricStatus(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler, now time.Time) bool {
func (c *Service) CheckHpaMetricStatus(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler) bool {
currenthpa = currenthpa.DeepCopy()
conditions := currenthpa.Status.Conditions
conditions := []v2.HorizontalPodAutoscalerCondition{}
if currenthpa.Status.Conditions == nil {
return false
}

if conditions[1].Type == "ScalingActive" && conditions[1].Status == "False" && conditions[1].Reason == "FailedGetResourceMetric" {
//switch to Emergency mode since no metrics
if currenthpa.Status.CurrentMetrics == nil {
return false
}
conditions = currenthpa.Status.Conditions
currentMetrics := currenthpa.Status.CurrentMetrics

if len(conditions) > 0 {
// do what you want
for _, condition := range conditions {
if condition.Type == "ScalingActive" && condition.Status == "False" && condition.Reason == "FailedGetResourceMetric" {
//switch to Emergency mode since no metrics
return false
}
}
}

if len(currentMetrics) > 0 {
for _, currentMetric := range currentMetrics {
if currentMetric.ContainerResource.Current.Value.IsZero() {
return false
}
}
}

return true
}
239 changes: 239 additions & 0 deletions pkg/hpa/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
v2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
Expand Down Expand Up @@ -4666,3 +4667,241 @@ func TestService_UpdateHPASpecFromTortoiseAutoscalingPolicy(t *testing.T) {
})
}
}

func TestService_CheckHpaMetricStatus(t *testing.T) {
tests := []struct {
name string
HPA *v2.HorizontalPodAutoscaler
result bool
}{
{
name: "metric server down, should return false",
HPA: &v2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-hpa",
Namespace: "default",
},
Status: v2.HorizontalPodAutoscalerStatus{
Conditions: []v2.HorizontalPodAutoscalerCondition{
{
Status: "True",
Type: v2.AbleToScale,
Message: "recommended size matches current size",
},
{
Status: "False",
Type: v2.ScalingActive,
Message: "the HPA was unable to compute the replica count: failed to get cpu utilization",
},
{
Status: "False",
Type: v2.ScalingLimited,
Message: "the desired count is within the acceptable range",
},
},
CurrentMetrics: []v2.MetricStatus{
{
Type: "ContainerResource",
ContainerResource: &v2.ContainerResourceMetricStatus{
Container: "app",
Current: v2.MetricValueStatus{
AverageUtilization: ptr.To[int32](0),
AverageValue: resource.NewQuantity(0, resource.DecimalSI),
Value: resource.NewQuantity(0, resource.DecimalSI),
},
Name: v1.ResourceCPU,
},
},
{
Type: "ContainerResource",
ContainerResource: &v2.ContainerResourceMetricStatus{
Container: "istio-proxy",
Current: v2.MetricValueStatus{
AverageUtilization: ptr.To[int32](0),
AverageValue: resource.NewQuantity(0, resource.DecimalSI),
Value: resource.NewQuantity(0, resource.DecimalSI),
},
Name: v1.ResourceCPU,
},
},
},
},
Spec: v2.HorizontalPodAutoscalerSpec{
MinReplicas: ptrInt32(3),
MaxReplicas: 1000,
Metrics: []v2.MetricSpec{
{
Type: v2.ContainerResourceMetricSourceType,
ContainerResource: &v2.ContainerResourceMetricSource{
Name: v1.ResourceCPU,
Container: "app",
Target: v2.MetricTarget{
AverageUtilization: ptr.To[int32](70),
Type: v2.UtilizationMetricType,
},
},
},
{
Type: v2.ContainerResourceMetricSourceType,
ContainerResource: &v2.ContainerResourceMetricSource{
Name: v1.ResourceCPU,
Container: "istio-proxy",
Target: v2.MetricTarget{
AverageUtilization: ptr.To[int32](70),
Type: v2.UtilizationMetricType,
},
},
},
},
ScaleTargetRef: v2.CrossVersionObjectReference{
Kind: "Deployment",
Name: "deployment",
APIVersion: "apps/v1",
},
Behavior: &v2.HorizontalPodAutoscalerBehavior{
ScaleUp: &v2.HPAScalingRules{
Policies: []v2.HPAScalingPolicy{
{
Type: v2.PercentScalingPolicy,
Value: 100,
PeriodSeconds: 60,
},
},
},
ScaleDown: &v2.HPAScalingRules{
Policies: []v2.HPAScalingPolicy{
{
Type: v2.PercentScalingPolicy,
Value: 2,
PeriodSeconds: 90,
},
},
},
},
},
},
result: false,
},
{
name: "Container resource metric missing",
HPA: &v2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-hpa",
Namespace: "default",
},
Status: v2.HorizontalPodAutoscalerStatus{
Conditions: []v2.HorizontalPodAutoscalerCondition{
{
Status: "True",
Type: v2.AbleToScale,
Message: "recommended size matches current size",
},
{
Status: "True",
Type: v2.ScalingActive,
Message: "the HPA was able to successfully calculate a replica count from cpu container resource utilization (percentage of request)",
},
{
Status: "False",
Type: v2.ScalingLimited,
Message: "the desired count is within the acceptable range",
},
},
CurrentMetrics: []v2.MetricStatus{
{
Type: "ContainerResource",
ContainerResource: &v2.ContainerResourceMetricStatus{
Container: "app",
Current: v2.MetricValueStatus{
AverageUtilization: ptr.To[int32](0),
AverageValue: resource.NewQuantity(0, resource.DecimalSI),
Value: resource.NewQuantity(0, resource.DecimalSI),
},
Name: v1.ResourceCPU,
},
},
{
Type: "ContainerResource",
ContainerResource: &v2.ContainerResourceMetricStatus{
Container: "istio-proxy",
Current: v2.MetricValueStatus{
AverageUtilization: ptr.To[int32](0),
AverageValue: resource.NewQuantity(0, resource.DecimalSI),
Value: resource.NewQuantity(0, resource.DecimalSI),
},
Name: v1.ResourceCPU,
},
},
},
},
Spec: v2.HorizontalPodAutoscalerSpec{
MinReplicas: ptrInt32(3),
MaxReplicas: 1000,
Metrics: []v2.MetricSpec{
{
Type: v2.ContainerResourceMetricSourceType,
ContainerResource: &v2.ContainerResourceMetricSource{
Name: v1.ResourceCPU,
Container: "app",
Target: v2.MetricTarget{
AverageUtilization: ptr.To[int32](70),
Type: v2.UtilizationMetricType,
},
},
},
{
Type: v2.ContainerResourceMetricSourceType,
ContainerResource: &v2.ContainerResourceMetricSource{
Name: v1.ResourceCPU,
Container: "istio-proxy",
Target: v2.MetricTarget{
AverageUtilization: ptr.To[int32](70),
Type: v2.UtilizationMetricType,
},
},
},
},
ScaleTargetRef: v2.CrossVersionObjectReference{
Kind: "Deployment",
Name: "deployment",
APIVersion: "apps/v1",
},
Behavior: &v2.HorizontalPodAutoscalerBehavior{
ScaleUp: &v2.HPAScalingRules{
Policies: []v2.HPAScalingPolicy{
{
Type: v2.PercentScalingPolicy,
Value: 100,
PeriodSeconds: 60,
},
},
},
ScaleDown: &v2.HPAScalingRules{
Policies: []v2.HPAScalingPolicy{
{
Type: v2.PercentScalingPolicy,
Value: 2,
PeriodSeconds: 90,
},
},
},
},
},
},
result: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, err := New(fake.NewClientBuilder().Build(), record.NewFakeRecorder(10), 0.95, 90, 100, time.Hour, 100, 1000, 3, "")
if err != nil {
t.Fatalf("New() error = %v", err)
}
status := c.CheckHpaMetricStatus(context.Background(), tt.HPA)
if status != tt.result {
t.Errorf("Service.checkHpaMetricStatus() status test: %s failed", tt.name)
return
}
})
}
}

0 comments on commit 7d07efd

Please sign in to comment.