GetObjectMetricReplicas ignores unready pods

Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).

After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.

This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
This commit is contained in:
mattjmcnaughton
2018-03-06 09:51:49 -05:00
parent 48a7048d98
commit 7e3bce7b3e
4 changed files with 87 additions and 13 deletions

View File

@@ -246,16 +246,32 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
defer tc.Unlock()
obj := &v1.PodList{}
for i := 0; i < len(tc.reportedCPURequests); i++ {
specifiedCPURequests := tc.reportedCPURequests != nil
numPodsToCreate := int(tc.initialReplicas)
if specifiedCPURequests {
numPodsToCreate = len(tc.reportedCPURequests)
}
for i := 0; i < numPodsToCreate; i++ {
podReadiness := v1.ConditionTrue
if tc.reportedPodReadiness != nil {
podReadiness = tc.reportedPodReadiness[i]
}
podPhase := v1.PodRunning
if tc.reportedPodPhase != nil {
podPhase = tc.reportedPodPhase[i]
}
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
reportedCPURequest := resource.MustParse("1.0")
if specifiedCPURequests {
reportedCPURequest = tc.reportedCPURequests[i]
}
pod := v1.Pod{
Status: v1.PodStatus{
Phase: podPhase,
@@ -273,12 +289,13 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
"name": podNamePrefix,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: tc.reportedCPURequests[i],
v1.ResourceCPU: reportedCPURequest,
},
},
},
@@ -1348,13 +1365,14 @@ func TestEmptyMetrics(t *testing.T) {
func TestEmptyCPURequest(t *testing.T) {
tc := testCase{
minReplicas: 1,
maxReplicas: 5,
initialReplicas: 1,
desiredReplicas: 1,
CPUTarget: 100,
reportedLevels: []uint64{200},
useMetricsAPI: true,
minReplicas: 1,
maxReplicas: 5,
initialReplicas: 1,
desiredReplicas: 1,
CPUTarget: 100,
reportedLevels: []uint64{200},
reportedCPURequests: []resource.Quantity{},
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},