api: resource.k8s.io PodScheduling -> PodSchedulingContext
The name "PodScheduling" was unusual because in contrast to most other names, it was impossible to put an article in front of it. Now PodSchedulingContext is used instead.
This commit is contained in:
parent
ee18f60252
commit
fec5233668
@ -58,8 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&ResourceClaimList{},
|
||||
&ResourceClaimTemplate{},
|
||||
&ResourceClaimTemplateList{},
|
||||
&PodScheduling{},
|
||||
&PodSchedulingList{},
|
||||
&PodSchedulingContext{},
|
||||
&PodSchedulingContextList{},
|
||||
)
|
||||
|
||||
return nil
|
||||
|
@ -173,27 +173,27 @@ type ResourceClaimList struct {
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodScheduling objects hold information that is needed to schedule
|
||||
// PodSchedulingContext objects hold information that is needed to schedule
|
||||
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
|
||||
// mode.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type PodScheduling struct {
|
||||
type PodSchedulingContext struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec describes where resources for the Pod are needed.
|
||||
Spec PodSchedulingSpec
|
||||
Spec PodSchedulingContextSpec
|
||||
|
||||
// Status describes where resources for the Pod can be allocated.
|
||||
Status PodSchedulingStatus
|
||||
Status PodSchedulingContextStatus
|
||||
}
|
||||
|
||||
// PodSchedulingSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingSpec struct {
|
||||
// PodSchedulingContextSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingContextSpec struct {
|
||||
// SelectedNode is the node for which allocation of ResourceClaims that
|
||||
// are referenced by the Pod and that use "WaitForFirstConsumer"
|
||||
// allocation is to be attempted.
|
||||
@ -209,8 +209,8 @@ type PodSchedulingSpec struct {
|
||||
PotentialNodes []string
|
||||
}
|
||||
|
||||
// PodSchedulingStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingStatus struct {
|
||||
// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingContextStatus struct {
|
||||
// ResourceClaims describes resource availability for each
|
||||
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
|
||||
// uses "WaitForFirstConsumer" allocation mode.
|
||||
@ -239,21 +239,21 @@ type ResourceClaimSchedulingStatus struct {
|
||||
}
|
||||
|
||||
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
|
||||
// node lists that are stored in PodScheduling objects. This limit is part
|
||||
// node lists that are stored in PodSchedulingContext objects. This limit is part
|
||||
// of the API.
|
||||
const PodSchedulingNodeListMaxSize = 128
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodSchedulingList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingList struct {
|
||||
// PodSchedulingContextList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingContextList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of PodScheduling objects.
|
||||
Items []PodScheduling
|
||||
// Items is the list of PodSchedulingContext objects.
|
||||
Items []PodSchedulingContext
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
@ -253,33 +253,33 @@ func validateResourceClaimConsumers(consumers []resource.ResourceClaimConsumerRe
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodScheduling validates a PodScheduling.
|
||||
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingSpec(&resourceClaim.Spec, field.NewPath("spec"))...)
|
||||
// ValidatePodSchedulingContext validates a PodSchedulingContext.
|
||||
func ValidatePodSchedulingContexts(schedulingCtx *resource.PodSchedulingContext) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&schedulingCtx.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingSpec(&schedulingCtx.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingSpec(spec *resource.PodSchedulingSpec, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodSchedulingSpec(spec *resource.PodSchedulingContextSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validateSliceIsASet(spec.PotentialNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, fldPath.Child("potentialNodes"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSchedulingUpdate tests if an update to PodScheduling is valid.
|
||||
func ValidatePodSchedulingUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidatePodScheduling(resourceClaim)...)
|
||||
// ValidatePodSchedulingContextUpdate tests if an update to PodSchedulingContext is valid.
|
||||
func ValidatePodSchedulingContextUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidatePodSchedulingContexts(schedulingCtx)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSchedulingStatusUpdate tests if an update to the status of a PodScheduling is valid.
|
||||
func ValidatePodSchedulingStatusUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingStatus(&resourceClaim.Status, field.NewPath("status"))...)
|
||||
// ValidatePodSchedulingContextStatusUpdate tests if an update to the status of a PodSchedulingContext is valid.
|
||||
func ValidatePodSchedulingContextStatusUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingStatus(&schedulingCtx.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingStatus(status *resource.PodSchedulingStatus, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodSchedulingStatus(status *resource.PodSchedulingContextStatus, fldPath *field.Path) field.ErrorList {
|
||||
return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims"))
|
||||
}
|
||||
|
||||
|
@ -1,338 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testPodScheduling(name, namespace string, spec resource.PodSchedulingSpec) *resource.PodScheduling {
|
||||
return &resource.PodScheduling{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodScheduling(t *testing.T) {
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
goodPodSchedulingSpec := resource.PodSchedulingSpec{}
|
||||
now := metav1.Now()
|
||||
badName := "!@#$%^"
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
scheduling *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-scheduling": {
|
||||
scheduling: testPodScheduling(goodName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
scheduling: testPodScheduling("", goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
scheduling: testPodScheduling(badName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
scheduling: testPodScheduling(goodName, "", goodPodSchedulingSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.GenerateName = "pvc-"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.ResourceVersion = "1"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Generation = 100
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.CreationTimestamp = now
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePodScheduling(scenario.scheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingUpdate(t *testing.T) {
|
||||
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodScheduling
|
||||
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
|
||||
},
|
||||
"add-selected-node": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Spec.SelectedNode = "worker1"
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"add-potential-nodes": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-too-long": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, badName)
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
|
||||
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodScheduling
|
||||
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
|
||||
},
|
||||
"add-claim-status": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-duplicated-claim-status": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < 2; i++ {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-too-long-claim-status": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-node-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
badName,
|
||||
)
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
@ -0,0 +1,342 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testPodSchedulingContexts(name, namespace string, spec resource.PodSchedulingContextSpec) *resource.PodSchedulingContext {
|
||||
return &resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingContexts(t *testing.T) {
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
goodPodSchedulingSpec := resource.PodSchedulingContextSpec{}
|
||||
now := metav1.Now()
|
||||
badName := "!@#$%^"
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
schedulingCtx *resource.PodSchedulingContext
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-schedulingCtx": {
|
||||
schedulingCtx: testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
schedulingCtx: testPodSchedulingContexts("", goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
schedulingCtx: testPodSchedulingContexts(badName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
schedulingCtx: testPodSchedulingContexts(goodName, "", goodPodSchedulingSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.GenerateName = "pvc-"
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.ResourceVersion = "1"
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Generation = 100
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.CreationTimestamp = now
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePodSchedulingContexts(scenario.schedulingCtx)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingUpdate(t *testing.T) {
|
||||
validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodSchedulingContext
|
||||
update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"add-selected-node": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Spec.SelectedNode = "worker1"
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"add-potential-nodes": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-too-long": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, badName)
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingContextUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
|
||||
validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodSchedulingContext
|
||||
update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"add-claim-status": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-duplicated-claim-status": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
for i := 0; i < 2; i++ {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
|
||||
)
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-too-long-claim-status": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-node-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
badName,
|
||||
)
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingContextStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
@ -638,13 +638,13 @@ func AddHandlers(h printers.PrintHandler) {
|
||||
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate)
|
||||
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplateList)
|
||||
|
||||
podSchedulingColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
podSchedulingCtxColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingSpec{}.SwaggerDoc()["selectedNode"]},
|
||||
{Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingContextSpec{}.SwaggerDoc()["selectedNode"]},
|
||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||
}
|
||||
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodScheduling)
|
||||
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodSchedulingList)
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext)
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList)
|
||||
}
|
||||
|
||||
// Pass ports=nil for all ports.
|
||||
@ -2870,7 +2870,7 @@ func printResourceClaimTemplateList(list *resource.ResourceClaimTemplateList, op
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
func printPodSchedulingContext(obj *resource.PodSchedulingContext, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
@ -2879,10 +2879,10 @@ func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOp
|
||||
return []metav1.TableRow{row}, nil
|
||||
}
|
||||
|
||||
func printPodSchedulingList(list *resource.PodSchedulingList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
func printPodSchedulingContextList(list *resource.PodSchedulingContextList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
for i := range list.Items {
|
||||
r, err := printPodScheduling(&list.Items[i], options)
|
||||
r, err := printPodSchedulingContext(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -28,54 +28,54 @@ import (
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/podscheduling"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for PodSchedulings.
|
||||
// REST implements a RESTStorage for PodSchedulingContext.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against PodSchedulings.
|
||||
// NewREST returns a RESTStorage object that will work against PodSchedulingContext.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &resource.PodScheduling{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.PodSchedulingList{} },
|
||||
PredicateFunc: podscheduling.Match,
|
||||
DefaultQualifiedResource: resource.Resource("podschedulings"),
|
||||
SingularQualifiedResource: resource.Resource("podscheduling"),
|
||||
NewFunc: func() runtime.Object { return &resource.PodSchedulingContext{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.PodSchedulingContextList{} },
|
||||
PredicateFunc: podschedulingcontext.Match,
|
||||
DefaultQualifiedResource: resource.Resource("podschedulingcontexts"),
|
||||
SingularQualifiedResource: resource.Resource("podschedulingcontext"),
|
||||
|
||||
CreateStrategy: podscheduling.Strategy,
|
||||
UpdateStrategy: podscheduling.Strategy,
|
||||
DeleteStrategy: podscheduling.Strategy,
|
||||
CreateStrategy: podschedulingcontext.Strategy,
|
||||
UpdateStrategy: podschedulingcontext.Strategy,
|
||||
DeleteStrategy: podschedulingcontext.Strategy,
|
||||
ReturnDeletedObject: true,
|
||||
ResetFieldsStrategy: podscheduling.Strategy,
|
||||
ResetFieldsStrategy: podschedulingcontext.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podscheduling.GetAttrs}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podschedulingcontext.GetAttrs}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statusStore := *store
|
||||
statusStore.UpdateStrategy = podscheduling.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = podscheduling.StatusStrategy
|
||||
statusStore.UpdateStrategy = podschedulingcontext.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = podschedulingcontext.StatusStrategy
|
||||
|
||||
rest := &REST{store}
|
||||
|
||||
return rest, &StatusREST{store: &statusStore}, nil
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of a PodScheduling.
|
||||
// StatusREST implements the REST endpoint for changing the status of a PodSchedulingContext.
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
||||
// New creates a new PodScheduling object.
|
||||
// New creates a new PodSchedulingContext object.
|
||||
func (r *StatusREST) New() runtime.Object {
|
||||
return &resource.PodScheduling{}
|
||||
return &resource.PodSchedulingContext{}
|
||||
}
|
||||
|
||||
func (r *StatusREST) Destroy() {
|
@ -41,7 +41,7 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "podschedulings",
|
||||
ResourcePrefix: "podschedulingcontexts",
|
||||
}
|
||||
podSchedulingStorage, statusStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
@ -50,18 +50,18 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
|
||||
return podSchedulingStorage, statusStorage, server
|
||||
}
|
||||
|
||||
func validNewPodScheduling(name, ns string) *resource.PodScheduling {
|
||||
scheduling := &resource.PodScheduling{
|
||||
func validNewPodSchedulingContexts(name, ns string) *resource.PodSchedulingContext {
|
||||
schedulingCtx := &resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: resource.PodSchedulingSpec{
|
||||
Spec: resource.PodSchedulingContextSpec{
|
||||
SelectedNode: "worker",
|
||||
},
|
||||
Status: resource.PodSchedulingStatus{},
|
||||
Status: resource.PodSchedulingContextStatus{},
|
||||
}
|
||||
return scheduling
|
||||
return schedulingCtx
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
@ -69,13 +69,13 @@ func TestCreate(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
scheduling := validNewPodScheduling("foo", metav1.NamespaceDefault)
|
||||
scheduling.ObjectMeta = metav1.ObjectMeta{}
|
||||
schedulingCtx := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)
|
||||
schedulingCtx.ObjectMeta = metav1.ObjectMeta{}
|
||||
test.TestCreate(
|
||||
// valid
|
||||
scheduling,
|
||||
schedulingCtx,
|
||||
// invalid
|
||||
&resource.PodScheduling{
|
||||
&resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
|
||||
},
|
||||
)
|
||||
@ -88,10 +88,10 @@ func TestUpdate(t *testing.T) {
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewPodScheduling("foo", metav1.NamespaceDefault),
|
||||
validNewPodSchedulingContexts("foo", metav1.NamespaceDefault),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.PodScheduling)
|
||||
object := obj.(*resource.PodSchedulingContext)
|
||||
if object.Labels == nil {
|
||||
object.Labels = map[string]string{}
|
||||
}
|
||||
@ -106,7 +106,7 @@ func TestDelete(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
|
||||
test.TestDelete(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
test.TestDelete(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
@ -114,7 +114,7 @@ func TestGet(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestGet(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
test.TestGet(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
@ -122,7 +122,7 @@ func TestList(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestList(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
test.TestList(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
@ -131,7 +131,7 @@ func TestWatch(t *testing.T) {
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestWatch(
|
||||
validNewPodScheduling("foo", metav1.NamespaceDefault),
|
||||
validNewPodSchedulingContexts("foo", metav1.NamespaceDefault),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
@ -156,19 +156,19 @@ func TestUpdateStatus(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
|
||||
key, _ := storage.KeyFunc(ctx, "foo")
|
||||
schedulingStart := validNewPodScheduling("foo", metav1.NamespaceDefault)
|
||||
schedulingStart := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)
|
||||
err := storage.Storage.Create(ctx, key, schedulingStart, nil, 0, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
scheduling := schedulingStart.DeepCopy()
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
schedulingCtx := schedulingStart.DeepCopy()
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
_, _, err = statusStorage.Update(ctx, scheduling.Name, rest.DefaultUpdatedObjectInfo(scheduling), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
_, _, err = statusStorage.Update(ctx, schedulingCtx.Name, rest.DefaultUpdatedObjectInfo(schedulingCtx), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
@ -176,9 +176,9 @@ func TestUpdateStatus(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
schedulingOut := obj.(*resource.PodScheduling)
|
||||
schedulingOut := obj.(*resource.PodSchedulingContext)
|
||||
// only compare relevant changes b/c of difference in metadata
|
||||
if !apiequality.Semantic.DeepEqual(scheduling.Status, schedulingOut.Status) {
|
||||
t.Errorf("unexpected object: %s", diff.ObjectDiff(scheduling.Status, schedulingOut.Status))
|
||||
if !apiequality.Semantic.DeepEqual(schedulingCtx.Status, schedulingOut.Status) {
|
||||
t.Errorf("unexpected object: %s", diff.ObjectDiff(schedulingCtx.Status, schedulingOut.Status))
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podscheduling
|
||||
package podschedulingcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -33,7 +33,7 @@ import (
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// podSchedulingStrategy implements behavior for PodScheduling objects
|
||||
// podSchedulingStrategy implements behavior for PodSchedulingContext objects
|
||||
type podSchedulingStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
@ -48,7 +48,7 @@ func (podSchedulingStrategy) NamespaceScoped() bool {
|
||||
}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy and
|
||||
// should not be modified by the user. For a new PodScheduling that is the
|
||||
// should not be modified by the user. For a new PodSchedulingContext that is the
|
||||
// status.
|
||||
func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
@ -61,14 +61,14 @@ func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpat
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
scheduling := obj.(*resource.PodScheduling)
|
||||
scheduling := obj.(*resource.PodSchedulingContext)
|
||||
// Status must not be set by user on create.
|
||||
scheduling.Status = resource.PodSchedulingStatus{}
|
||||
scheduling.Status = resource.PodSchedulingContextStatus{}
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
scheduling := obj.(*resource.PodScheduling)
|
||||
return validation.ValidatePodScheduling(scheduling)
|
||||
scheduling := obj.(*resource.PodSchedulingContext)
|
||||
return validation.ValidatePodSchedulingContexts(scheduling)
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
@ -83,16 +83,16 @@ func (podSchedulingStrategy) AllowCreateOnUpdate() bool {
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
newScheduling.Status = oldScheduling.Status
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
errorList := validation.ValidatePodScheduling(newScheduling)
|
||||
return append(errorList, validation.ValidatePodSchedulingUpdate(newScheduling, oldScheduling)...)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
errorList := validation.ValidatePodSchedulingContexts(newScheduling)
|
||||
return append(errorList, validation.ValidatePodSchedulingContextUpdate(newScheduling, oldScheduling)...)
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
@ -122,15 +122,15 @@ func (podSchedulingStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fi
|
||||
}
|
||||
|
||||
func (podSchedulingStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
newScheduling.Spec = oldScheduling.Spec
|
||||
}
|
||||
|
||||
func (podSchedulingStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
return validation.ValidatePodSchedulingStatusUpdate(newScheduling, oldScheduling)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
return validation.ValidatePodSchedulingContextStatusUpdate(newScheduling, oldScheduling)
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
@ -149,15 +149,15 @@ func Match(label labels.Selector, field fields.Selector) storage.SelectionPredic
|
||||
|
||||
// GetAttrs returns labels and fields of a given object for filtering purposes.
|
||||
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
scheduling, ok := obj.(*resource.PodScheduling)
|
||||
scheduling, ok := obj.(*resource.PodSchedulingContext)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("not a PodScheduling")
|
||||
return nil, nil, errors.New("not a PodSchedulingContext")
|
||||
}
|
||||
return labels.Set(scheduling.Labels), toSelectableFields(scheduling), nil
|
||||
}
|
||||
|
||||
// toSelectableFields returns a field set that represents the object
|
||||
func toSelectableFields(scheduling *resource.PodScheduling) fields.Set {
|
||||
func toSelectableFields(scheduling *resource.PodSchedulingContext) fields.Set {
|
||||
fields := generic.ObjectMetaFieldsSet(&scheduling.ObjectMeta, true)
|
||||
return fields
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podscheduling
|
||||
package podschedulingcontext
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@ -24,31 +24,31 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
var podScheduling = &resource.PodScheduling{
|
||||
var schedulingCtx = &resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: resource.PodSchedulingSpec{
|
||||
Spec: resource.PodSchedulingContextSpec{
|
||||
SelectedNode: "worker",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategy(t *testing.T) {
|
||||
if !Strategy.NamespaceScoped() {
|
||||
t.Errorf("PodScheduling must be namespace scoped")
|
||||
t.Errorf("PodSchedulingContext must be namespace scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("PodScheduling should not allow create on update")
|
||||
t.Errorf("PodSchedulingContext should not allow create on update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategyCreate(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
schedulingCtx := schedulingCtx.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(ctx, podScheduling)
|
||||
errs := Strategy.Validate(ctx, podScheduling)
|
||||
Strategy.PrepareForCreate(ctx, schedulingCtx)
|
||||
errs := Strategy.Validate(ctx, schedulingCtx)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected error validating for create %v", errs)
|
||||
}
|
||||
@ -57,12 +57,12 @@ func TestPodSchedulingStrategyCreate(t *testing.T) {
|
||||
func TestPodSchedulingStrategyUpdate(t *testing.T) {
|
||||
t.Run("no-changes-okay", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling.ResourceVersion = "4"
|
||||
schedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling)
|
||||
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling)
|
||||
Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected validation errors: %v", errs)
|
||||
}
|
||||
@ -70,13 +70,13 @@ func TestPodSchedulingStrategyUpdate(t *testing.T) {
|
||||
|
||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling.Name = "valid-claim-2"
|
||||
newPodScheduling.ResourceVersion = "4"
|
||||
schedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx.Name = "valid-claim-2"
|
||||
newSchedulingCtx.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling)
|
||||
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling)
|
||||
Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected a validation error")
|
||||
}
|
@ -24,7 +24,7 @@ import (
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
podschedulingstore "k8s.io/kubernetes/pkg/registry/resource/podscheduling/storage"
|
||||
podschedulingcontextsstore "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext/storage"
|
||||
resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage"
|
||||
resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage"
|
||||
resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage"
|
||||
@ -74,8 +74,8 @@ func (p RESTStorageProvider) v1alpha2Storage(apiResourceConfigSource serverstora
|
||||
storage[resource] = resourceClaimTemplateStorage
|
||||
}
|
||||
|
||||
if resource := "podschedulings"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
|
||||
podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingstore.NewREST(restOptionsGetter)
|
||||
if resource := "podschedulingcontexts"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
|
||||
podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingcontextsstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -378,10 +378,10 @@ func addAllEventHandlers(
|
||||
informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.PersistentVolumeClaim, "Pvc"),
|
||||
)
|
||||
case framework.PodScheduling:
|
||||
case framework.PodSchedulingContext:
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
_, _ = informerFactory.Resource().V1alpha2().PodSchedulings().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.PodScheduling, "PodScheduling"),
|
||||
_, _ = informerFactory.Resource().V1alpha2().PodSchedulingContexts().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.PodSchedulingContext, "PodSchedulingContext"),
|
||||
)
|
||||
}
|
||||
case framework.ResourceClaim:
|
||||
|
@ -74,14 +74,14 @@ type stateData struct {
|
||||
// protected by the mutex. Used by PostFilter.
|
||||
unavailableClaims sets.Int
|
||||
|
||||
// A pointer to the PodScheduling object for the pod, if one exists.
|
||||
// A pointer to the PodSchedulingContext object for the pod, if one exists.
|
||||
// Gets set on demand.
|
||||
//
|
||||
// Conceptually, this object belongs into the scheduler framework
|
||||
// where it might get shared by different plugins. But in practice,
|
||||
// it is currently only used by dynamic provisioning and thus
|
||||
// managed entirely here.
|
||||
podScheduling *resourcev1alpha2.PodScheduling
|
||||
schedulingCtx *resourcev1alpha2.PodSchedulingContext
|
||||
|
||||
// podSchedulingDirty is true if the current copy was locally modified.
|
||||
podSchedulingDirty bool
|
||||
@ -112,23 +112,23 @@ func (d *stateData) updateClaimStatus(ctx context.Context, clientset kubernetes.
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializePodScheduling can be called concurrently. It returns an existing PodScheduling
|
||||
// initializePodSchedulingContext can be called concurrently. It returns an existing PodSchedulingContext
|
||||
// object if there is one already, retrieves one if not, or as a last resort creates
|
||||
// one from scratch.
|
||||
func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, podSchedulingLister resourcev1alpha2listers.PodSchedulingLister) (*resourcev1alpha2.PodScheduling, error) {
|
||||
// TODO (#113701): check if this mutex locking can be avoided by calling initializePodScheduling during PreFilter.
|
||||
func (d *stateData) initializePodSchedulingContexts(ctx context.Context, pod *v1.Pod, podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister) (*resourcev1alpha2.PodSchedulingContext, error) {
|
||||
// TODO (#113701): check if this mutex locking can be avoided by calling initializePodSchedulingContext during PreFilter.
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
if d.podScheduling != nil {
|
||||
return d.podScheduling, nil
|
||||
if d.schedulingCtx != nil {
|
||||
return d.schedulingCtx, nil
|
||||
}
|
||||
|
||||
podScheduling, err := podSchedulingLister.PodSchedulings(pod.Namespace).Get(pod.Name)
|
||||
schedulingCtx, err := podSchedulingContextLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name)
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
controller := true
|
||||
podScheduling = &resourcev1alpha2.PodScheduling{
|
||||
schedulingCtx = &resourcev1alpha2.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
@ -148,56 +148,56 @@ func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, po
|
||||
return nil, err
|
||||
default:
|
||||
// We have an object, but it might be obsolete.
|
||||
if !metav1.IsControlledBy(podScheduling, pod) {
|
||||
return nil, fmt.Errorf("PodScheduling object with UID %s is not owned by Pod %s/%s", podScheduling.UID, pod.Namespace, pod.Name)
|
||||
if !metav1.IsControlledBy(schedulingCtx, pod) {
|
||||
return nil, fmt.Errorf("PodSchedulingContext object with UID %s is not owned by Pod %s/%s", schedulingCtx.UID, pod.Namespace, pod.Name)
|
||||
}
|
||||
}
|
||||
d.podScheduling = podScheduling
|
||||
return podScheduling, err
|
||||
d.schedulingCtx = schedulingCtx
|
||||
return schedulingCtx, err
|
||||
}
|
||||
|
||||
// publishPodScheduling creates or updates the PodScheduling object.
|
||||
func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernetes.Interface, podScheduling *resourcev1alpha2.PodScheduling) error {
|
||||
// publishPodSchedulingContext creates or updates the PodSchedulingContext object.
|
||||
func (d *stateData) publishPodSchedulingContexts(ctx context.Context, clientset kubernetes.Interface, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
var err error
|
||||
logger := klog.FromContext(ctx)
|
||||
msg := "Updating PodScheduling"
|
||||
if podScheduling.UID == "" {
|
||||
msg = "Creating PodScheduling"
|
||||
msg := "Updating PodSchedulingContext"
|
||||
if schedulingCtx.UID == "" {
|
||||
msg = "Creating PodSchedulingContext"
|
||||
}
|
||||
if loggerV := logger.V(6); loggerV.Enabled() {
|
||||
// At a high enough log level, dump the entire object.
|
||||
loggerV.Info(msg, "podschedulingDump", podScheduling)
|
||||
loggerV.Info(msg, "podSchedulingCtxDump", schedulingCtx)
|
||||
} else {
|
||||
logger.V(5).Info(msg, "podscheduling", klog.KObj(podScheduling))
|
||||
logger.V(5).Info(msg, "podSchedulingCtx", klog.KObj(schedulingCtx))
|
||||
}
|
||||
if podScheduling.UID == "" {
|
||||
podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Create(ctx, podScheduling, metav1.CreateOptions{})
|
||||
if schedulingCtx.UID == "" {
|
||||
schedulingCtx, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Create(ctx, schedulingCtx, metav1.CreateOptions{})
|
||||
} else {
|
||||
// TODO (#113700): patch here to avoid racing with drivers which update the status.
|
||||
podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Update(ctx, podScheduling, metav1.UpdateOptions{})
|
||||
schedulingCtx, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Update(ctx, schedulingCtx, metav1.UpdateOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.podScheduling = podScheduling
|
||||
d.schedulingCtx = schedulingCtx
|
||||
d.podSchedulingDirty = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// storePodScheduling replaces the pod scheduling object in the state.
|
||||
func (d *stateData) storePodScheduling(podScheduling *resourcev1alpha2.PodScheduling) {
|
||||
// storePodSchedulingContext replaces the pod schedulingCtx object in the state.
|
||||
func (d *stateData) storePodSchedulingContexts(schedulingCtx *resourcev1alpha2.PodSchedulingContext) {
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
d.podScheduling = podScheduling
|
||||
d.schedulingCtx = schedulingCtx
|
||||
d.podSchedulingDirty = true
|
||||
}
|
||||
|
||||
func statusForClaim(podScheduling *resourcev1alpha2.PodScheduling, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus {
|
||||
for _, status := range podScheduling.Status.ResourceClaims {
|
||||
func statusForClaim(schedulingCtx *resourcev1alpha2.PodSchedulingContext, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus {
|
||||
for _, status := range schedulingCtx.Status.ResourceClaims {
|
||||
if status.Name == podClaimName {
|
||||
return &status
|
||||
}
|
||||
@ -211,7 +211,7 @@ type dynamicResources struct {
|
||||
clientset kubernetes.Interface
|
||||
claimLister resourcev1alpha2listers.ResourceClaimLister
|
||||
classLister resourcev1alpha2listers.ResourceClassLister
|
||||
podSchedulingLister resourcev1alpha2listers.PodSchedulingLister
|
||||
podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
@ -226,7 +226,7 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram
|
||||
clientset: fh.ClientSet(),
|
||||
claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(),
|
||||
classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(),
|
||||
podSchedulingLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulings().Lister(),
|
||||
podSchedulingContextLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulingContexts().Lister(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -257,7 +257,7 @@ func (pl *dynamicResources) EventsToRegister() []framework.ClusterEvent {
|
||||
// may be schedulable.
|
||||
// TODO (#113702): can we change this so that such an event does not trigger *all* pods?
|
||||
// Yes: https://github.com/kubernetes/kubernetes/blob/abcbaed0784baf5ed2382aae9705a8918f2daa18/pkg/scheduler/eventhandlers.go#L70
|
||||
{Resource: framework.PodScheduling, ActionType: framework.Add | framework.Update},
|
||||
{Resource: framework.PodSchedulingContext, ActionType: framework.Add | framework.Update},
|
||||
// A resource might depend on node labels for topology filtering.
|
||||
// A new or updated node may make pods schedulable.
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel},
|
||||
@ -436,11 +436,11 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState
|
||||
}
|
||||
|
||||
// Now we need information from drivers.
|
||||
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister)
|
||||
schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
|
||||
if err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
status := statusForClaim(podScheduling, pod.Spec.ResourceClaims[index].Name)
|
||||
status := statusForClaim(schedulingCtx, pod.Spec.ResourceClaims[index].Name)
|
||||
if status != nil {
|
||||
for _, unsuitableNode := range status.UnsuitableNodes {
|
||||
if node.Name == unsuitableNode {
|
||||
@ -530,7 +530,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister)
|
||||
schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
|
||||
if err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
@ -540,22 +540,22 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
if pending && !haveAllNodes(podScheduling.Spec.PotentialNodes, nodes) {
|
||||
if pending && !haveAllNodes(schedulingCtx.Spec.PotentialNodes, nodes) {
|
||||
// Remember the potential nodes. The object will get created or
|
||||
// updated in Reserve. This is both an optimization and
|
||||
// covers the case that PreScore doesn't get called when there
|
||||
// is only a single node.
|
||||
logger.V(5).Info("remembering potential nodes", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes))
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
numNodes := len(nodes)
|
||||
if numNodes > resourcev1alpha2.PodSchedulingNodeListMaxSize {
|
||||
numNodes = resourcev1alpha2.PodSchedulingNodeListMaxSize
|
||||
}
|
||||
podScheduling.Spec.PotentialNodes = make([]string, 0, numNodes)
|
||||
schedulingCtx.Spec.PotentialNodes = make([]string, 0, numNodes)
|
||||
if numNodes == len(nodes) {
|
||||
// Copy all node names.
|
||||
for _, node := range nodes {
|
||||
podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, node.Name)
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, node.Name)
|
||||
}
|
||||
} else {
|
||||
// Select a random subset of the nodes to comply with
|
||||
@ -567,14 +567,14 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
|
||||
nodeNames[node.Name] = struct{}{}
|
||||
}
|
||||
for nodeName := range nodeNames {
|
||||
if len(podScheduling.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize {
|
||||
if len(schedulingCtx.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize {
|
||||
break
|
||||
}
|
||||
podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, nodeName)
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, nodeName)
|
||||
}
|
||||
}
|
||||
sort.Strings(podScheduling.Spec.PotentialNodes)
|
||||
state.storePodScheduling(podScheduling)
|
||||
sort.Strings(schedulingCtx.Spec.PotentialNodes)
|
||||
state.storePodSchedulingContexts(schedulingCtx)
|
||||
}
|
||||
logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", nodes)
|
||||
return nil
|
||||
@ -614,7 +614,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
numDelayedAllocationPending := 0
|
||||
numClaimsWithStatusInfo := 0
|
||||
logger := klog.FromContext(ctx)
|
||||
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister)
|
||||
schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
|
||||
if err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
@ -639,7 +639,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
return statusError(logger, err)
|
||||
}
|
||||
// If we get here, we know that reserving the claim for
|
||||
// the pod worked and we can proceed with scheduling
|
||||
// the pod worked and we can proceed with schedulingCtx
|
||||
// it.
|
||||
} else {
|
||||
// Must be delayed allocation.
|
||||
@ -647,7 +647,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
|
||||
// Did the driver provide information that steered node
|
||||
// selection towards a node that it can support?
|
||||
if statusForClaim(podScheduling, pod.Spec.ResourceClaims[index].Name) != nil {
|
||||
if statusForClaim(schedulingCtx, pod.Spec.ResourceClaims[index].Name) != nil {
|
||||
numClaimsWithStatusInfo++
|
||||
}
|
||||
}
|
||||
@ -659,13 +659,13 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
}
|
||||
|
||||
podSchedulingDirty := state.podSchedulingDirty
|
||||
if len(podScheduling.Spec.PotentialNodes) == 0 {
|
||||
if len(schedulingCtx.Spec.PotentialNodes) == 0 {
|
||||
// PreScore was not called, probably because there was
|
||||
// only one candidate. We need to ask whether that
|
||||
// node is suitable, otherwise the scheduler will pick
|
||||
// it forever even when it cannot satisfy the claim.
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Spec.PotentialNodes = []string{nodeName}
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
schedulingCtx.Spec.PotentialNodes = []string{nodeName}
|
||||
logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
podSchedulingDirty = true
|
||||
}
|
||||
@ -675,16 +675,16 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
// the driver yet. Otherwise we wait for information before blindly
|
||||
// making a decision that might have to be reversed later.
|
||||
if numDelayedAllocationPending == 1 || numClaimsWithStatusInfo == numDelayedAllocationPending {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
// TODO: can we increase the chance that the scheduler picks
|
||||
// the same node as before when allocation is on-going,
|
||||
// assuming that that node still fits the pod? Picking a
|
||||
// different node may lead to some claims being allocated for
|
||||
// one node and others for another, which then would have to be
|
||||
// resolved with deallocation.
|
||||
podScheduling.Spec.SelectedNode = nodeName
|
||||
schedulingCtx.Spec.SelectedNode = nodeName
|
||||
logger.V(5).Info("start allocation", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
if err := state.publishPodScheduling(ctx, pl.clientset, podScheduling); err != nil {
|
||||
if err := state.publishPodSchedulingContexts(ctx, pl.clientset, schedulingCtx); err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
return statusUnschedulable(logger, "waiting for resource driver to allocate resource", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
@ -692,14 +692,14 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
|
||||
// May have been modified earlier in PreScore or above.
|
||||
if podSchedulingDirty {
|
||||
if err := state.publishPodScheduling(ctx, pl.clientset, podScheduling); err != nil {
|
||||
if err := state.publishPodSchedulingContexts(ctx, pl.clientset, schedulingCtx); err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
}
|
||||
|
||||
// More than one pending claim and not enough information about all of them.
|
||||
//
|
||||
// TODO: can or should we ensure that scheduling gets aborted while
|
||||
// TODO: can or should we ensure that schedulingCtx gets aborted while
|
||||
// waiting for resources *before* triggering delayed volume
|
||||
// provisioning? On the one hand, volume provisioning is currently
|
||||
// irreversible, so it better should come last. On the other hand,
|
||||
@ -737,7 +737,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
|
||||
claim.Status.ReservedFor = reservedFor
|
||||
logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim))
|
||||
if err := state.updateClaimStatus(ctx, pl.clientset, index, claim); err != nil {
|
||||
// We will get here again when pod scheduling
|
||||
// We will get here again when pod schedulingCtx
|
||||
// is retried.
|
||||
logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim))
|
||||
}
|
||||
@ -746,7 +746,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
|
||||
}
|
||||
|
||||
// PostBind is called after a pod is successfully bound to a node. Now we are
|
||||
// sure that a PodScheduling object, if it exists, is definitely not going to
|
||||
// sure that a PodSchedulingContext object, if it exists, is definitely not going to
|
||||
// be needed anymore and can delete it. This is a one-shot thing, there won't
|
||||
// be any retries. This is okay because it should usually work and in those
|
||||
// cases where it doesn't, the garbage collector will eventually clean up.
|
||||
@ -762,19 +762,19 @@ func (pl *dynamicResources) PostBind(ctx context.Context, cs *framework.CycleSta
|
||||
return
|
||||
}
|
||||
|
||||
// We cannot know for sure whether the PodScheduling object exists. We
|
||||
// might have created it in the previous pod scheduling cycle and not
|
||||
// We cannot know for sure whether the PodSchedulingContext object exists. We
|
||||
// might have created it in the previous pod schedulingCtx cycle and not
|
||||
// have it in our informer cache yet. Let's try to delete, just to be
|
||||
// on the safe side.
|
||||
logger := klog.FromContext(ctx)
|
||||
err = pl.clientset.ResourceV1alpha2().PodSchedulings(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
err = pl.clientset.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
logger.V(5).Info("no PodScheduling object to delete")
|
||||
logger.V(5).Info("no PodSchedulingContext object to delete")
|
||||
case err != nil:
|
||||
logger.Error(err, "delete PodScheduling")
|
||||
logger.Error(err, "delete PodSchedulingContext")
|
||||
default:
|
||||
logger.V(5).Info("PodScheduling object deleted")
|
||||
logger.V(5).Info("PodSchedulingContext object deleted")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,16 +125,16 @@ var (
|
||||
ResourceClassName(className).
|
||||
Obj()
|
||||
|
||||
scheduling = st.MakePodScheduling().Name(podName).Namespace(namespace).
|
||||
scheduling = st.MakePodSchedulingContexts().Name(podName).Namespace(namespace).
|
||||
OwnerReference(podName, podUID, podKind).
|
||||
Obj()
|
||||
schedulingPotential = st.FromPodScheduling(scheduling).
|
||||
schedulingPotential = st.FromPodSchedulingContexts(scheduling).
|
||||
PotentialNodes(workerNode.Name).
|
||||
Obj()
|
||||
schedulingSelectedPotential = st.FromPodScheduling(schedulingPotential).
|
||||
schedulingSelectedPotential = st.FromPodSchedulingContexts(schedulingPotential).
|
||||
SelectedNode(workerNode.Name).
|
||||
Obj()
|
||||
schedulingInfo = st.FromPodScheduling(schedulingPotential).
|
||||
schedulingInfo = st.FromPodSchedulingContexts(schedulingPotential).
|
||||
ResourceClaims(resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName},
|
||||
resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName2}).
|
||||
Obj()
|
||||
@ -160,7 +160,7 @@ type result struct {
|
||||
// functions will get called for all objects of that type. If they needs to
|
||||
// make changes only to a particular instance, then it must check the name.
|
||||
type change struct {
|
||||
scheduling func(*resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling
|
||||
scheduling func(*resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext
|
||||
claim func(*resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim
|
||||
}
|
||||
type perNodeResult map[string]result
|
||||
@ -203,7 +203,7 @@ func TestPlugin(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
claims []*resourcev1alpha2.ResourceClaim
|
||||
classes []*resourcev1alpha2.ResourceClass
|
||||
schedulings []*resourcev1alpha2.PodScheduling
|
||||
schedulings []*resourcev1alpha2.PodSchedulingContext
|
||||
|
||||
prepare prepare
|
||||
want want
|
||||
@ -269,7 +269,7 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-select-immediately": {
|
||||
// Create the PodScheduling object, ask for information
|
||||
// Create the PodSchedulingContext object, ask for information
|
||||
// and select a node.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
@ -282,7 +282,7 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-ask": {
|
||||
// Create the PodScheduling object, ask for
|
||||
// Create the PodSchedulingContext object, ask for
|
||||
// information, but do not select a node because
|
||||
// there are multiple claims.
|
||||
pod: podWithTwoClaimNames,
|
||||
@ -296,18 +296,18 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-finish": {
|
||||
// Use the populated PodScheduling object to select a
|
||||
// Use the populated PodSchedulingContext object to select a
|
||||
// node.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`),
|
||||
changes: change{
|
||||
scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
return st.FromPodScheduling(in).
|
||||
scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
return st.FromPodSchedulingContexts(in).
|
||||
SelectedNode(workerNode.Name).
|
||||
Obj()
|
||||
},
|
||||
@ -316,19 +316,19 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-finish-concurrent-label-update": {
|
||||
// Use the populated PodScheduling object to select a
|
||||
// Use the populated PodSchedulingContext object to select a
|
||||
// node.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
prepare: prepare{
|
||||
reserve: change{
|
||||
scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
// This does not actually conflict with setting the
|
||||
// selected node, but because the plugin is not using
|
||||
// patching yet, Update nonetheless fails.
|
||||
return st.FromPodScheduling(in).
|
||||
return st.FromPodSchedulingContexts(in).
|
||||
Label("hello", "world").
|
||||
Obj()
|
||||
},
|
||||
@ -341,10 +341,10 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-completed": {
|
||||
// Remove PodScheduling object once the pod is scheduled.
|
||||
// Remove PodSchedulingContext object once the pod is scheduled.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
@ -366,7 +366,7 @@ func TestPlugin(t *testing.T) {
|
||||
pod: otherPodWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{inUseClaim},
|
||||
classes: []*resourcev1alpha2.ResourceClass{},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{},
|
||||
prepare: prepare{},
|
||||
want: want{
|
||||
prefilter: result{
|
||||
@ -591,7 +591,7 @@ func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
|
||||
for _, claim := range claims.Items {
|
||||
objects = append(objects, &claim)
|
||||
}
|
||||
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulings("").List(tc.ctx, metav1.ListOptions{})
|
||||
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list pod scheduling")
|
||||
for _, scheduling := range schedulings.Items {
|
||||
objects = append(objects, &scheduling)
|
||||
@ -615,8 +615,8 @@ func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, up
|
||||
t.Fatalf("unexpected error during prepare update: %v", err)
|
||||
}
|
||||
modified[i] = obj
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
obj, err := tc.client.ResourceV1alpha2().PodSchedulings(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
obj, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during prepare update: %v", err)
|
||||
}
|
||||
@ -650,7 +650,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje
|
||||
if updates.claim != nil {
|
||||
obj = updates.claim(in)
|
||||
}
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
if updates.scheduling != nil {
|
||||
obj = updates.scheduling(in)
|
||||
}
|
||||
@ -661,7 +661,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje
|
||||
return updated
|
||||
}
|
||||
|
||||
func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodScheduling) (result *testContext) {
|
||||
func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodSchedulingContext) (result *testContext) {
|
||||
t.Helper()
|
||||
|
||||
tc := &testContext{}
|
||||
@ -702,7 +702,7 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceCl
|
||||
require.NoError(t, err, "create resource class")
|
||||
}
|
||||
for _, scheduling := range schedulings {
|
||||
_, err := tc.client.ResourceV1alpha2().PodSchedulings(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{})
|
||||
_, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "create pod scheduling")
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ const (
|
||||
Node GVK = "Node"
|
||||
PersistentVolume GVK = "PersistentVolume"
|
||||
PersistentVolumeClaim GVK = "PersistentVolumeClaim"
|
||||
PodScheduling GVK = "PodScheduling"
|
||||
PodSchedulingContext GVK = "PodSchedulingContext"
|
||||
ResourceClaim GVK = "ResourceClaim"
|
||||
StorageClass GVK = "storage.k8s.io/StorageClass"
|
||||
CSINode GVK = "storage.k8s.io/CSINode"
|
||||
|
@ -925,22 +925,24 @@ func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourcev1alpha2.R
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// PodSchedulingWrapper wraps a PodScheduling inside.
|
||||
type PodSchedulingWrapper struct{ resourcev1alpha2.PodScheduling }
|
||||
|
||||
// MakePodScheduling creates a PodScheduling wrapper.
|
||||
func MakePodScheduling() *PodSchedulingWrapper {
|
||||
return &PodSchedulingWrapper{resourcev1alpha2.PodScheduling{}}
|
||||
// PodSchedulingWrapper wraps a PodSchedulingContext inside.
|
||||
type PodSchedulingWrapper struct {
|
||||
resourcev1alpha2.PodSchedulingContext
|
||||
}
|
||||
|
||||
// FromPodScheduling creates a PodScheduling wrapper from some existing object.
|
||||
func FromPodScheduling(other *resourcev1alpha2.PodScheduling) *PodSchedulingWrapper {
|
||||
// MakePodSchedulingContext creates a PodSchedulingContext wrapper.
|
||||
func MakePodSchedulingContexts() *PodSchedulingWrapper {
|
||||
return &PodSchedulingWrapper{resourcev1alpha2.PodSchedulingContext{}}
|
||||
}
|
||||
|
||||
// FromPodSchedulingContext creates a PodSchedulingContext wrapper from some existing object.
|
||||
func FromPodSchedulingContexts(other *resourcev1alpha2.PodSchedulingContext) *PodSchedulingWrapper {
|
||||
return &PodSchedulingWrapper{*other.DeepCopy()}
|
||||
}
|
||||
|
||||
// Obj returns the inner object.
|
||||
func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodScheduling {
|
||||
return &wrapper.PodScheduling
|
||||
func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodSchedulingContext {
|
||||
return &wrapper.PodSchedulingContext
|
||||
}
|
||||
|
||||
// Name sets `s` as the name of the inner object.
|
||||
|
@ -575,8 +575,8 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
||||
kubeSchedulerRules = append(kubeSchedulerRules,
|
||||
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclaims", "resourceclasses").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulings").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulings/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(),
|
||||
)
|
||||
}
|
||||
roles = append(roles, rbacv1.ClusterRole{
|
||||
|
@ -50,8 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&ResourceClaimList{},
|
||||
&ResourceClaimTemplate{},
|
||||
&ResourceClaimTemplateList{},
|
||||
&PodScheduling{},
|
||||
&PodSchedulingList{},
|
||||
&PodSchedulingContext{},
|
||||
&PodSchedulingContextList{},
|
||||
)
|
||||
|
||||
// Add common types
|
||||
|
@ -181,28 +181,28 @@ type ResourceClaimList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// PodScheduling objects hold information that is needed to schedule
|
||||
// PodSchedulingContext objects hold information that is needed to schedule
|
||||
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
|
||||
// mode.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type PodScheduling struct {
|
||||
type PodSchedulingContext struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec describes where resources for the Pod are needed.
|
||||
Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
|
||||
// Status describes where resources for the Pod can be allocated.
|
||||
// +optional
|
||||
Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// PodSchedulingSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingSpec struct {
|
||||
// PodSchedulingContextSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingContextSpec struct {
|
||||
// SelectedNode is the node for which allocation of ResourceClaims that
|
||||
// are referenced by the Pod and that use "WaitForFirstConsumer"
|
||||
// allocation is to be attempted.
|
||||
@ -221,8 +221,8 @@ type PodSchedulingSpec struct {
|
||||
PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
|
||||
}
|
||||
|
||||
// PodSchedulingStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingStatus struct {
|
||||
// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingContextStatus struct {
|
||||
// ResourceClaims describes resource availability for each
|
||||
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
|
||||
// uses "WaitForFirstConsumer" allocation mode.
|
||||
@ -257,22 +257,22 @@ type ResourceClaimSchedulingStatus struct {
|
||||
}
|
||||
|
||||
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
|
||||
// node lists that are stored in PodScheduling objects. This limit is part
|
||||
// node lists that are stored in PodSchedulingContext objects. This limit is part
|
||||
// of the API.
|
||||
const PodSchedulingNodeListMaxSize = 128
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// PodSchedulingList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingList struct {
|
||||
// PodSchedulingContextList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingContextList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of PodScheduling objects.
|
||||
Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
// Items is the list of PodSchedulingContext objects.
|
||||
Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
@ -139,9 +139,9 @@ type controller struct {
|
||||
rcLister resourcev1alpha2listers.ResourceClassLister
|
||||
rcSynced cache.InformerSynced
|
||||
claimCache cache.MutationCache
|
||||
podSchedulingLister resourcev1alpha2listers.PodSchedulingLister
|
||||
schedulingCtxLister resourcev1alpha2listers.PodSchedulingContextLister
|
||||
claimSynced cache.InformerSynced
|
||||
podSchedulingSynced cache.InformerSynced
|
||||
schedulingCtxSynced cache.InformerSynced
|
||||
}
|
||||
|
||||
// TODO: make it configurable
|
||||
@ -157,7 +157,7 @@ func New(
|
||||
logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller")
|
||||
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings()
|
||||
schedulingCtxInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
go func() {
|
||||
@ -177,7 +177,7 @@ func New(
|
||||
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme,
|
||||
v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)})
|
||||
|
||||
// The work queue contains either keys for claims or PodScheduling objects.
|
||||
// The work queue contains either keys for claims or PodSchedulingContext objects.
|
||||
queue := workqueue.NewNamedRateLimitingQueue(
|
||||
workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name))
|
||||
|
||||
@ -199,8 +199,8 @@ func New(
|
||||
rcSynced: rcInformer.Informer().HasSynced,
|
||||
claimCache: claimCache,
|
||||
claimSynced: claimInformer.Informer().HasSynced,
|
||||
podSchedulingLister: podSchedulingInformer.Lister(),
|
||||
podSchedulingSynced: podSchedulingInformer.Informer().HasSynced,
|
||||
schedulingCtxLister: schedulingCtxInformer.Lister(),
|
||||
schedulingCtxSynced: schedulingCtxInformer.Informer().HasSynced,
|
||||
queue: queue,
|
||||
eventRecorder: eventRecorder,
|
||||
}
|
||||
@ -209,11 +209,11 @@ func New(
|
||||
if loggerV6.Enabled() {
|
||||
resourceClaimLogger := klog.LoggerWithValues(loggerV6, "type", "ResourceClaim")
|
||||
_, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&resourceClaimLogger, ctrl))
|
||||
podSchedulingLogger := klog.LoggerWithValues(loggerV6, "type", "PodScheduling")
|
||||
_, _ = podSchedulingInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&podSchedulingLogger, ctrl))
|
||||
schedulingCtxLogger := klog.LoggerWithValues(loggerV6, "type", "PodSchedulingContext")
|
||||
_, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&schedulingCtxLogger, ctrl))
|
||||
} else {
|
||||
_, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
|
||||
_, _ = podSchedulingInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
|
||||
_, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
|
||||
}
|
||||
|
||||
return ctrl
|
||||
@ -233,7 +233,7 @@ func resourceEventHandlerFuncs(logger *klog.Logger, ctrl *controller) cache.Reso
|
||||
|
||||
const (
|
||||
claimKeyPrefix = "claim:"
|
||||
podSchedulingKeyPrefix = "podscheduling:"
|
||||
schedulingCtxKeyPrefix = "schedulingCtx:"
|
||||
)
|
||||
|
||||
func (ctrl *controller) add(logger *klog.Logger, obj interface{}) {
|
||||
@ -279,8 +279,8 @@ func getKey(obj interface{}) (string, error) {
|
||||
switch obj.(type) {
|
||||
case *resourcev1alpha2.ResourceClaim:
|
||||
prefix = claimKeyPrefix
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
prefix = podSchedulingKeyPrefix
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
prefix = schedulingCtxKeyPrefix
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected object: %T", obj)
|
||||
}
|
||||
@ -297,7 +297,7 @@ func (ctrl *controller) Run(workers int) {
|
||||
|
||||
stopCh := ctrl.ctx.Done()
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.podSchedulingSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.schedulingCtxSynced) {
|
||||
ctrl.logger.Error(nil, "Cannot sync caches")
|
||||
return
|
||||
}
|
||||
@ -370,16 +370,16 @@ func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Ob
|
||||
return nil, err
|
||||
}
|
||||
obj, finalErr = claim, ctrl.syncClaim(ctx, claim)
|
||||
case podSchedulingKeyPrefix:
|
||||
podScheduling, err := ctrl.podSchedulingLister.PodSchedulings(namespace).Get(name)
|
||||
case schedulingCtxKeyPrefix:
|
||||
schedulingCtx, err := ctrl.schedulingCtxLister.PodSchedulingContexts(namespace).Get(name)
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
klog.FromContext(ctx).V(5).Info("PodScheduling was deleted, no need to process it")
|
||||
klog.FromContext(ctx).V(5).Info("PodSchedulingContext was deleted, no need to process it")
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
obj, finalErr = podScheduling, ctrl.syncPodScheduling(ctx, podScheduling)
|
||||
obj, finalErr = schedulingCtx, ctrl.syncPodSchedulingContexts(ctx, schedulingCtx)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -525,9 +525,9 @@ func (ctrl *controller) allocateClaim(ctx context.Context,
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if claim.Status.Allocation != nil {
|
||||
// This can happen when two PodScheduling objects trigger
|
||||
// This can happen when two PodSchedulingContext objects trigger
|
||||
// allocation attempts (first one wins) or when we see the
|
||||
// update of the PodScheduling object.
|
||||
// update of the PodSchedulingContext object.
|
||||
logger.V(5).Info("Claim already allocated, nothing to do")
|
||||
return nil
|
||||
}
|
||||
@ -601,19 +601,19 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim
|
||||
}, nil
|
||||
}
|
||||
|
||||
// syncClaim determines which next action may be needed for a PodScheduling object
|
||||
// syncPodSchedulingContext determines which next action may be needed for a PodSchedulingContext object
|
||||
// and does it.
|
||||
func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *resourcev1alpha2.PodScheduling) error {
|
||||
func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
// Ignore deleted objects.
|
||||
if podScheduling.DeletionTimestamp != nil {
|
||||
logger.V(5).Info("PodScheduling marked for deletion")
|
||||
if schedulingCtx.DeletionTimestamp != nil {
|
||||
logger.V(5).Info("PodSchedulingContext marked for deletion")
|
||||
return nil
|
||||
}
|
||||
|
||||
if podScheduling.Spec.SelectedNode == "" &&
|
||||
len(podScheduling.Spec.PotentialNodes) == 0 {
|
||||
if schedulingCtx.Spec.SelectedNode == "" &&
|
||||
len(schedulingCtx.Spec.PotentialNodes) == 0 {
|
||||
// Nothing to do? Shouldn't occur.
|
||||
logger.V(5).Info("Waiting for scheduler to set fields")
|
||||
return nil
|
||||
@ -621,8 +621,8 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
|
||||
// Check pod.
|
||||
// TODO (?): use an informer - only useful when many (most?) pods have claims
|
||||
// TODO (?): let the scheduler copy all claim names + UIDs into PodScheduling - then we don't need the pod
|
||||
pod, err := ctrl.kubeClient.CoreV1().Pods(podScheduling.Namespace).Get(ctx, podScheduling.Name, metav1.GetOptions{})
|
||||
// TODO (?): let the scheduler copy all claim names + UIDs into PodSchedulingContext - then we don't need the pod
|
||||
pod, err := ctrl.kubeClient.CoreV1().Pods(schedulingCtx.Namespace).Get(ctx, schedulingCtx.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -632,16 +632,16 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
}
|
||||
|
||||
// Still the owner?
|
||||
if !metav1.IsControlledBy(podScheduling, pod) {
|
||||
if !metav1.IsControlledBy(schedulingCtx, pod) {
|
||||
// Must be obsolete object, do nothing for it.
|
||||
logger.V(5).Info("Pod not owner, PodScheduling is obsolete")
|
||||
logger.V(5).Info("Pod not owner, PodSchedulingContext is obsolete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find all pending claims that are owned by us. We bail out if any of the pre-requisites
|
||||
// for pod scheduling (claims exist, classes exist, parameters exist) are not met.
|
||||
// The scheduler will do the same, except for checking parameters, so usually
|
||||
// everything should be ready once the PodScheduling object exists.
|
||||
// everything should be ready once the PodSchedulingContext object exists.
|
||||
var claims claimAllocations
|
||||
for _, podClaim := range pod.Spec.ResourceClaims {
|
||||
delayed, err := ctrl.checkPodClaim(ctx, pod, podClaim)
|
||||
@ -665,12 +665,12 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
// and shouldn't, because those allocations might have to be undone to
|
||||
// pick a better node. If we don't need to allocate now, then we'll
|
||||
// simply report back the gather information.
|
||||
if len(podScheduling.Spec.PotentialNodes) > 0 {
|
||||
if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, podScheduling.Spec.PotentialNodes); err != nil {
|
||||
if len(schedulingCtx.Spec.PotentialNodes) > 0 {
|
||||
if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, schedulingCtx.Spec.PotentialNodes); err != nil {
|
||||
return fmt.Errorf("checking potential nodes: %v", err)
|
||||
}
|
||||
}
|
||||
selectedNode := podScheduling.Spec.SelectedNode
|
||||
selectedNode := schedulingCtx.Spec.SelectedNode
|
||||
logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode)
|
||||
if selectedNode != "" {
|
||||
unsuitable := false
|
||||
@ -703,26 +703,26 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
// TODO: replace with patching the array. We can do that without race conditions
|
||||
// because each driver is responsible for its own entries.
|
||||
modified := false
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
for _, delayed := range claims {
|
||||
i := findClaim(podScheduling.Status.ResourceClaims, delayed.PodClaimName)
|
||||
i := findClaim(schedulingCtx.Status.ResourceClaims, delayed.PodClaimName)
|
||||
if i < 0 {
|
||||
// Add new entry.
|
||||
podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims,
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resourcev1alpha2.ResourceClaimSchedulingStatus{
|
||||
Name: delayed.PodClaimName,
|
||||
UnsuitableNodes: delayed.UnsuitableNodes,
|
||||
})
|
||||
modified = true
|
||||
} else if stringsDiffer(podScheduling.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) {
|
||||
} else if stringsDiffer(schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) {
|
||||
// Update existing entry.
|
||||
podScheduling.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes
|
||||
schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
if modified {
|
||||
logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podScheduling", podScheduling)
|
||||
if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).UpdateStatus(ctx, podScheduling, metav1.UpdateOptions{}); err != nil {
|
||||
logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podSchedulingCtx", schedulingCtx)
|
||||
if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("update unsuitable node status: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -55,10 +55,10 @@ func TestController(t *testing.T) {
|
||||
delayedClaim := claim.DeepCopy()
|
||||
delayedClaim.Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer
|
||||
podName := "pod"
|
||||
podKey := "podscheduling:default/pod"
|
||||
podKey := "schedulingCtx:default/pod"
|
||||
pod := createPod(podName, claimNamespace, nil)
|
||||
podClaimName := "my-pod-claim"
|
||||
podScheduling := createPodScheduling(pod)
|
||||
podSchedulingCtx := createPodSchedulingContexts(pod)
|
||||
podWithClaim := createPod(podName, claimNamespace, map[string]string{podClaimName: claimName})
|
||||
nodeName := "worker"
|
||||
otherNodeName := "worker-2"
|
||||
@ -96,22 +96,22 @@ func TestController(t *testing.T) {
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
}
|
||||
withSelectedNode := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Spec.SelectedNode = nodeName
|
||||
return podScheduling
|
||||
withSelectedNode := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
podSchedulingCtx = podSchedulingCtx.DeepCopy()
|
||||
podSchedulingCtx.Spec.SelectedNode = nodeName
|
||||
return podSchedulingCtx
|
||||
}
|
||||
withUnsuitableNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims,
|
||||
withUnsuitableNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
podSchedulingCtx = podSchedulingCtx.DeepCopy()
|
||||
podSchedulingCtx.Status.ResourceClaims = append(podSchedulingCtx.Status.ResourceClaims,
|
||||
resourcev1alpha2.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes},
|
||||
)
|
||||
return podScheduling
|
||||
return podSchedulingCtx
|
||||
}
|
||||
withPotentialNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Spec.PotentialNodes = potentialNodes
|
||||
return podScheduling
|
||||
withPotentialNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
podSchedulingCtx = podSchedulingCtx.DeepCopy()
|
||||
podSchedulingCtx.Spec.PotentialNodes = potentialNodes
|
||||
return podSchedulingCtx
|
||||
}
|
||||
|
||||
var m mockDriver
|
||||
@ -121,7 +121,7 @@ func TestController(t *testing.T) {
|
||||
driver mockDriver
|
||||
classes []*resourcev1alpha2.ResourceClass
|
||||
pod *corev1.Pod
|
||||
podScheduling, expectedPodScheduling *resourcev1alpha2.PodScheduling
|
||||
schedulingCtx, expectedSchedulingCtx *resourcev1alpha2.PodSchedulingContext
|
||||
claim, expectedClaim *resourcev1alpha2.ResourceClaim
|
||||
expectedError string
|
||||
}{
|
||||
@ -308,8 +308,8 @@ func TestController(t *testing.T) {
|
||||
"pod-nop": {
|
||||
key: podKey,
|
||||
pod: pod,
|
||||
podScheduling: withSelectedNode(podScheduling),
|
||||
expectedPodScheduling: withSelectedNode(podScheduling),
|
||||
schedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
|
||||
@ -319,8 +319,8 @@ func TestController(t *testing.T) {
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: withSelectedNode(podScheduling),
|
||||
expectedPodScheduling: withSelectedNode(podScheduling),
|
||||
schedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
|
||||
@ -331,8 +331,8 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: delayedClaim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: podScheduling,
|
||||
expectedPodScheduling: podScheduling,
|
||||
schedulingCtx: podSchedulingCtx,
|
||||
expectedSchedulingCtx: podSchedulingCtx,
|
||||
},
|
||||
|
||||
// pod with delayed allocation, potential nodes -> provide unsuitable nodes
|
||||
@ -342,11 +342,11 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: delayedClaim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: withPotentialNodes(podScheduling),
|
||||
schedulingCtx: withPotentialNodes(podSchedulingCtx),
|
||||
driver: m.expectClassParameters(map[string]interface{}{className: 1}).
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil),
|
||||
expectedPodScheduling: withUnsuitableNodes(withPotentialNodes(podScheduling)),
|
||||
expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
|
||||
@ -356,8 +356,8 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: delayedClaim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: withSelectedNode(withPotentialNodes(podScheduling)),
|
||||
expectedPodScheduling: withSelectedNode(withPotentialNodes(podScheduling)),
|
||||
schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedSchedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedError: `pod claim my-pod-claim: resourceclass.resource.k8s.io "mock-class" not found`,
|
||||
},
|
||||
|
||||
@ -368,12 +368,12 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: withReservedFor(withAllocate(delayedClaim), pod),
|
||||
pod: podWithClaim,
|
||||
podScheduling: withSelectedNode(withPotentialNodes(podScheduling)),
|
||||
schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
|
||||
driver: m.expectClassParameters(map[string]interface{}{className: 1}).
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil).
|
||||
expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}),
|
||||
expectedPodScheduling: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podScheduling))),
|
||||
expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
} {
|
||||
@ -388,8 +388,8 @@ func TestController(t *testing.T) {
|
||||
if test.pod != nil {
|
||||
initialObjects = append(initialObjects, test.pod)
|
||||
}
|
||||
if test.podScheduling != nil {
|
||||
initialObjects = append(initialObjects, test.podScheduling)
|
||||
if test.schedulingCtx != nil {
|
||||
initialObjects = append(initialObjects, test.schedulingCtx)
|
||||
}
|
||||
if test.claim != nil {
|
||||
initialObjects = append(initialObjects, test.claim)
|
||||
@ -398,7 +398,7 @@ func TestController(t *testing.T) {
|
||||
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
// Order is important: on function exit, we first must
|
||||
// cancel, then wait (last-in-first-out).
|
||||
defer informerFactory.Shutdown()
|
||||
@ -412,7 +412,7 @@ func TestController(t *testing.T) {
|
||||
require.NoError(t, claimInformer.Informer().GetStore().Add(obj), "add resource claim")
|
||||
case *corev1.Pod:
|
||||
require.NoError(t, podInformer.Informer().GetStore().Add(obj), "add pod")
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
require.NoError(t, podSchedulingInformer.Informer().GetStore().Add(obj), "add pod scheduling")
|
||||
default:
|
||||
t.Fatalf("unknown initialObject type: %+v", obj)
|
||||
@ -427,7 +427,7 @@ func TestController(t *testing.T) {
|
||||
if !cache.WaitForCacheSync(ctx.Done(),
|
||||
informerFactory.Resource().V1alpha2().ResourceClasses().Informer().HasSynced,
|
||||
informerFactory.Resource().V1alpha2().ResourceClaims().Informer().HasSynced,
|
||||
informerFactory.Resource().V1alpha2().PodSchedulings().Informer().HasSynced,
|
||||
informerFactory.Resource().V1alpha2().PodSchedulingContexts().Informer().HasSynced,
|
||||
) {
|
||||
t.Fatal("could not sync caches")
|
||||
}
|
||||
@ -449,11 +449,11 @@ func TestController(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, expectedClaims, claims.Items)
|
||||
|
||||
podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulings("").List(ctx, metav1.ListOptions{})
|
||||
podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list pod schedulings")
|
||||
var expectedPodSchedulings []resourcev1alpha2.PodScheduling
|
||||
if test.expectedPodScheduling != nil {
|
||||
expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedPodScheduling)
|
||||
var expectedPodSchedulings []resourcev1alpha2.PodSchedulingContext
|
||||
if test.expectedSchedulingCtx != nil {
|
||||
expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedSchedulingCtx)
|
||||
}
|
||||
assert.Equal(t, expectedPodSchedulings, podSchedulings.Items)
|
||||
|
||||
@ -620,9 +620,9 @@ func createPod(podName, podNamespace string, claims map[string]string) *corev1.P
|
||||
return pod
|
||||
}
|
||||
|
||||
func createPodScheduling(pod *corev1.Pod) *resourcev1alpha2.PodScheduling {
|
||||
func createPodSchedulingContexts(pod *corev1.Pod) *resourcev1alpha2.PodSchedulingContext {
|
||||
controller := true
|
||||
return &resourcev1alpha2.PodScheduling{
|
||||
return &resourcev1alpha2.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
|
@ -64,7 +64,7 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulings.
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
// standard for []metav1.Condition
|
||||
@ -88,10 +88,10 @@ var noConflicts = map[string]struct{}{
|
||||
// namespaces only have a spec.finalizers field which is also skipped,
|
||||
// thus it will never have a conflict.
|
||||
"namespaces": {},
|
||||
// podschedulings.status only has a list which contains items with a list,
|
||||
// podschedulingcontexts.status only has a list which contains items with a list,
|
||||
// therefore apply works because it simply merges either the outer or
|
||||
// the inner list.
|
||||
"podschedulings": {},
|
||||
"podschedulingcontexts": {},
|
||||
}
|
||||
|
||||
var image2 = image.GetE2EImage(image.Etcd)
|
||||
@ -148,7 +148,7 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
|
||||
gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`,
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,
|
||||
|
@ -54,7 +54,7 @@ var statusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
// standard for []metav1.Condition
|
||||
|
@ -469,9 +469,9 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulings"): {
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): {
|
||||
Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`,
|
||||
ExpectedEtcdPath: "/registry/podschedulings/" + namespace + "/pod1name",
|
||||
ExpectedEtcdPath: "/registry/podschedulingcontexts/" + namespace + "/pod1name",
|
||||
},
|
||||
// --
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user