Merge pull request #29653 from ZTE-PaaS/zhangke-patch-010

Automatic merge from submit-queue

the observed usage should match those that have hard constraints

in the sync process, the quota will be replenished, the new observed usage will be sumed from each evaluator, if the previousUsed set is not be cleared, the new usage will be dirty, maybe some unusage resource still in ,  as the code below
	newUsage = quota.Mask(newUsage, matchedResources)
	for key, value := range newUsage {
		usage.Status.Used[key] = value
	}
so i think here shoul not set value previousUsed

<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.kubernetes.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.kubernetes.io/reviews/kubernetes/kubernetes/29653)
<!-- Reviewable:end -->
This commit is contained in:
Kubernetes Submit Queue 2016-08-11 21:54:28 -07:00 committed by GitHub
commit 01aff52e0c
2 changed files with 104 additions and 0 deletions

View File

@ -289,6 +289,10 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQ
used[key] = value used[key] = value
} }
// ensure set of used values match those that have hard constraints
hardResources := quota.ResourceNames(hardLimits)
used = quota.Mask(used, hardResources)
// Create a usage object that is based on the quota resource version that will handle updates // Create a usage object that is based on the quota resource version that will handle updates
// by default, we preserve the past usage observation, and set hard to the current spec // by default, we preserve the past usage observation, and set hard to the current spec
usage := api.ResourceQuota{ usage := api.ResourceQuota{

View File

@ -242,6 +242,106 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
} }
} }
func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
resourceQuota := api.ResourceQuota{
ObjectMeta: api.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: api.ResourceQuotaSpec{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("1Gi"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
api.ResourceMemory: resource.MustParse("0"),
},
},
}
expectedUsage := api.ResourceQuota{
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient),
GroupKindsToReplenish: []unversioned.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota)
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
// ensure usage hard and used are are synced with spec hard, not have dirty resource
for k, v := range usage.Status.Hard {
if k == api.ResourceMemory {
t.Errorf("Unexpected Usage Hard: Key: %v, Value: %v", k, v.String())
}
}
for k, v := range usage.Status.Used {
if k == api.ResourceMemory {
t.Errorf("Unexpected Usage Used: Key: %v, Value: %v", k, v.String())
}
}
}
func TestSyncResourceQuotaNoChange(t *testing.T) { func TestSyncResourceQuotaNoChange(t *testing.T) {
resourceQuota := api.ResourceQuota{ resourceQuota := api.ResourceQuota{