Merge pull request #109058 from oliviermichaelis/calculate-start-replicas

Fix replica calculation at start of HPA scaling policy period
This commit is contained in:
Kubernetes Prow Robot 2022-08-30 10:58:55 -07:00 committed by GitHub
commit da6d8c997e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 36 additions and 10 deletions

View File

@ -929,7 +929,7 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma
var possibleLimitingReason, possibleLimitingMessage string
if args.DesiredReplicas > args.CurrentReplicas {
scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], args.ScaleUpBehavior)
scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleUpBehavior)
if scaleUpLimit < args.CurrentReplicas {
// We shouldn't scale up further until the scaleUpEvents will be cleaned up
scaleUpLimit = args.CurrentReplicas
@ -947,7 +947,7 @@ func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args Norma
return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage
}
} else if args.DesiredReplicas < args.CurrentReplicas {
scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleDownEvents[args.Key], args.ScaleDownBehavior)
scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleDownBehavior)
if scaleDownLimit > args.CurrentReplicas {
// We shouldn't scale down further until the scaleDownEvents will be cleaned up
scaleDownLimit = args.CurrentReplicas
@ -1032,7 +1032,7 @@ func getLongestPolicyPeriod(scalingRules *autoscalingv2.HPAScalingRules) int32 {
}
// calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules
func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
var result int32
var proposed int32
var selectPolicyFn func(int32, int32) int32
@ -1046,8 +1046,9 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
}
for _, policy := range scalingRules.Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents)
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod
if policy.Type == autoscalingv2.PodsScalingPolicy {
proposed = periodStartReplicas + policy.Value
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
@ -1060,7 +1061,7 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
}
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
var result int32
var proposed int32
var selectPolicyFn func(int32, int32) int32
@ -1074,8 +1075,9 @@ func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []t
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
}
for _, policy := range scalingRules.Policies {
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
periodStartReplicas := currentReplicas + replicasDeletedInCurrentPeriod
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents)
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod
if policy.Type == autoscalingv2.PodsScalingPolicy {
proposed = periodStartReplicas - policy.Value
} else if policy.Type == autoscalingv2.PercentScalingPolicy {

View File

@ -3029,7 +3029,7 @@ func TestConvertDesiredReplicasWithRules(t *testing.T) {
func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) {
policy := autoscalingv2.MinChangePolicySelect
calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
calculated := calculateScaleUpLimitWithScalingRules(1, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
SelectPolicy: &policy,
Policies: []autoscalingv2.HPAScalingPolicy{
@ -3051,7 +3051,7 @@ func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) {
func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) {
policy := autoscalingv2.MinChangePolicySelect
calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
calculated := calculateScaleDownLimitWithBehaviors(5, []timestampedScaleEvent{}, []timestampedScaleEvent{}, &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: utilpointer.Int32Ptr(300),
SelectPolicy: &policy,
Policies: []autoscalingv2.HPAScalingPolicy{
@ -3485,6 +3485,18 @@ func TestScalingWithRules(t *testing.T) {
expectedReplicas: 255, // (100 - 15) + 200%
expectedCondition: "ScaleUpLimit",
},
{
name: "scaleUp with percent policy and previous scale up and down events",
scaleUpEvents: generateEventsUniformDistribution([]int{4}, 120),
scaleDownEvents: generateEventsUniformDistribution([]int{2}, 120),
specMinReplicas: 1,
specMaxReplicas: 1000,
scaleUpRules: generateScalingRules(0, 0, 300, 300, 0),
currentReplicas: 6,
prenormalizedDesiredReplicas: 24,
expectedReplicas: 16,
expectedCondition: "ScaleUpLimit",
},
// ScaleDown with PeriodSeconds usage
{
name: "scaleDown with default policy and previous events",
@ -3551,6 +3563,18 @@ func TestScalingWithRules(t *testing.T) {
expectedReplicas: 56, // (100 + 12) - 50%
expectedCondition: "ScaleDownLimit",
},
{
name: "scaleDown with percent policy and previous scale up and down events",
scaleUpEvents: generateEventsUniformDistribution([]int{2}, 120),
scaleDownEvents: generateEventsUniformDistribution([]int{4}, 120),
specMinReplicas: 1,
specMaxReplicas: 1000,
scaleDownRules: generateScalingRules(0, 0, 50, 180, 0),
currentReplicas: 10,
prenormalizedDesiredReplicas: 1,
expectedReplicas: 6,
expectedCondition: "ScaleDownLimit",
},
{
// corner case for calculating the scaleDownLimit, when we changed pod or percent policy after a lot of scaleDown events
// in this case we shouldn't allow scale down, though, the naive formula will suggest that scaleDownlimit is more then CurrentReplicas (100+30-10% > 100)