Merge pull request #116556 from pohly/dra-podschedulingcontext
dra: PodScheduling -> PodSchedulingContext
This commit is contained in:
@@ -58,8 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&ResourceClaimList{},
|
||||
&ResourceClaimTemplate{},
|
||||
&ResourceClaimTemplateList{},
|
||||
&PodScheduling{},
|
||||
&PodSchedulingList{},
|
||||
&PodSchedulingContext{},
|
||||
&PodSchedulingContextList{},
|
||||
)
|
||||
|
||||
return nil
|
||||
|
@@ -173,27 +173,27 @@ type ResourceClaimList struct {
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodScheduling objects hold information that is needed to schedule
|
||||
// PodSchedulingContext objects hold information that is needed to schedule
|
||||
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
|
||||
// mode.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type PodScheduling struct {
|
||||
type PodSchedulingContext struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec describes where resources for the Pod are needed.
|
||||
Spec PodSchedulingSpec
|
||||
Spec PodSchedulingContextSpec
|
||||
|
||||
// Status describes where resources for the Pod can be allocated.
|
||||
Status PodSchedulingStatus
|
||||
Status PodSchedulingContextStatus
|
||||
}
|
||||
|
||||
// PodSchedulingSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingSpec struct {
|
||||
// PodSchedulingContextSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingContextSpec struct {
|
||||
// SelectedNode is the node for which allocation of ResourceClaims that
|
||||
// are referenced by the Pod and that use "WaitForFirstConsumer"
|
||||
// allocation is to be attempted.
|
||||
@@ -209,8 +209,8 @@ type PodSchedulingSpec struct {
|
||||
PotentialNodes []string
|
||||
}
|
||||
|
||||
// PodSchedulingStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingStatus struct {
|
||||
// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingContextStatus struct {
|
||||
// ResourceClaims describes resource availability for each
|
||||
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
|
||||
// uses "WaitForFirstConsumer" allocation mode.
|
||||
@@ -239,21 +239,21 @@ type ResourceClaimSchedulingStatus struct {
|
||||
}
|
||||
|
||||
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
|
||||
// node lists that are stored in PodScheduling objects. This limit is part
|
||||
// node lists that are stored in PodSchedulingContext objects. This limit is part
|
||||
// of the API.
|
||||
const PodSchedulingNodeListMaxSize = 128
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodSchedulingList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingList struct {
|
||||
// PodSchedulingContextList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingContextList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of PodScheduling objects.
|
||||
Items []PodScheduling
|
||||
// Items is the list of PodSchedulingContext objects.
|
||||
Items []PodSchedulingContext
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
108
pkg/apis/resource/v1alpha2/zz_generated.conversion.go
generated
108
pkg/apis/resource/v1alpha2/zz_generated.conversion.go
generated
@@ -50,43 +50,43 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodScheduling)(nil), (*resource.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodScheduling_To_resource_PodScheduling(a.(*v1alpha2.PodScheduling), b.(*resource.PodScheduling), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContext)(nil), (*resource.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(a.(*v1alpha2.PodSchedulingContext), b.(*resource.PodSchedulingContext), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodScheduling)(nil), (*v1alpha2.PodScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodScheduling_To_v1alpha2_PodScheduling(a.(*resource.PodScheduling), b.(*v1alpha2.PodScheduling), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContext)(nil), (*v1alpha2.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(a.(*resource.PodSchedulingContext), b.(*v1alpha2.PodSchedulingContext), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingList)(nil), (*resource.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(a.(*v1alpha2.PodSchedulingList), b.(*resource.PodSchedulingList), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContextList)(nil), (*resource.PodSchedulingContextList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(a.(*v1alpha2.PodSchedulingContextList), b.(*resource.PodSchedulingContextList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingList)(nil), (*v1alpha2.PodSchedulingList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(a.(*resource.PodSchedulingList), b.(*v1alpha2.PodSchedulingList), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextList)(nil), (*v1alpha2.PodSchedulingContextList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(a.(*resource.PodSchedulingContextList), b.(*v1alpha2.PodSchedulingContextList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingSpec)(nil), (*resource.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(a.(*v1alpha2.PodSchedulingSpec), b.(*resource.PodSchedulingSpec), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContextSpec)(nil), (*resource.PodSchedulingContextSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(a.(*v1alpha2.PodSchedulingContextSpec), b.(*resource.PodSchedulingContextSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingSpec)(nil), (*v1alpha2.PodSchedulingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(a.(*resource.PodSchedulingSpec), b.(*v1alpha2.PodSchedulingSpec), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextSpec)(nil), (*v1alpha2.PodSchedulingContextSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(a.(*resource.PodSchedulingContextSpec), b.(*v1alpha2.PodSchedulingContextSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingStatus)(nil), (*resource.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(a.(*v1alpha2.PodSchedulingStatus), b.(*resource.PodSchedulingStatus), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContextStatus)(nil), (*resource.PodSchedulingContextStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(a.(*v1alpha2.PodSchedulingContextStatus), b.(*resource.PodSchedulingContextStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingStatus)(nil), (*v1alpha2.PodSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(a.(*resource.PodSchedulingStatus), b.(*v1alpha2.PodSchedulingStatus), scope)
|
||||
if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextStatus)(nil), (*v1alpha2.PodSchedulingContextStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(a.(*resource.PodSchedulingContextStatus), b.(*v1alpha2.PodSchedulingContextStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -247,100 +247,100 @@ func Convert_resource_AllocationResult_To_v1alpha2_AllocationResult(in *resource
|
||||
return autoConvert_resource_AllocationResult_To_v1alpha2_AllocationResult(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PodScheduling_To_resource_PodScheduling(in *v1alpha2.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error {
|
||||
func autoConvert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in *v1alpha2.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
if err := Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil {
|
||||
if err := Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_PodScheduling_To_resource_PodScheduling is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodScheduling_To_resource_PodScheduling(in *v1alpha2.PodScheduling, out *resource.PodScheduling, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodScheduling_To_resource_PodScheduling(in, out, s)
|
||||
// Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in *v1alpha2.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_resource_PodScheduling_To_v1alpha2_PodScheduling(in *resource.PodScheduling, out *v1alpha2.PodScheduling, s conversion.Scope) error {
|
||||
func autoConvert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(in *resource.PodSchedulingContext, out *v1alpha2.PodSchedulingContext, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
if err := Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(&in.Status, &out.Status, s); err != nil {
|
||||
if err := Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_resource_PodScheduling_To_v1alpha2_PodScheduling is an autogenerated conversion function.
|
||||
func Convert_resource_PodScheduling_To_v1alpha2_PodScheduling(in *resource.PodScheduling, out *v1alpha2.PodScheduling, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodScheduling_To_v1alpha2_PodScheduling(in, out, s)
|
||||
// Convert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(in *resource.PodSchedulingContext, out *v1alpha2.PodSchedulingContext, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingContext_To_v1alpha2_PodSchedulingContext(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha2.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error {
|
||||
func autoConvert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(in *v1alpha2.PodSchedulingContextList, out *resource.PodSchedulingContextList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]resource.PodScheduling)(unsafe.Pointer(&in.Items))
|
||||
out.Items = *(*[]resource.PodSchedulingContext)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in *v1alpha2.PodSchedulingList, out *resource.PodSchedulingList, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingList_To_resource_PodSchedulingList(in, out, s)
|
||||
// Convert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(in *v1alpha2.PodSchedulingContextList, out *resource.PodSchedulingContextList, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingContextList_To_resource_PodSchedulingContextList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha2.PodSchedulingList, s conversion.Scope) error {
|
||||
func autoConvert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(in *resource.PodSchedulingContextList, out *v1alpha2.PodSchedulingContextList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]v1alpha2.PodScheduling)(unsafe.Pointer(&in.Items))
|
||||
out.Items = *(*[]v1alpha2.PodSchedulingContext)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in *resource.PodSchedulingList, out *v1alpha2.PodSchedulingList, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingList_To_v1alpha2_PodSchedulingList(in, out, s)
|
||||
// Convert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(in *resource.PodSchedulingContextList, out *v1alpha2.PodSchedulingContextList, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingContextList_To_v1alpha2_PodSchedulingContextList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha2.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error {
|
||||
func autoConvert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in *v1alpha2.PodSchedulingContextSpec, out *resource.PodSchedulingContextSpec, s conversion.Scope) error {
|
||||
out.SelectedNode = in.SelectedNode
|
||||
out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in *v1alpha2.PodSchedulingSpec, out *resource.PodSchedulingSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingSpec_To_resource_PodSchedulingSpec(in, out, s)
|
||||
// Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in *v1alpha2.PodSchedulingContextSpec, out *resource.PodSchedulingContextSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha2.PodSchedulingSpec, s conversion.Scope) error {
|
||||
func autoConvert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(in *resource.PodSchedulingContextSpec, out *v1alpha2.PodSchedulingContextSpec, s conversion.Scope) error {
|
||||
out.SelectedNode = in.SelectedNode
|
||||
out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in *resource.PodSchedulingSpec, out *v1alpha2.PodSchedulingSpec, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingSpec_To_v1alpha2_PodSchedulingSpec(in, out, s)
|
||||
// Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(in *resource.PodSchedulingContextSpec, out *v1alpha2.PodSchedulingContextSpec, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingContextSpec_To_v1alpha2_PodSchedulingContextSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha2.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error {
|
||||
func autoConvert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in *v1alpha2.PodSchedulingContextStatus, out *resource.PodSchedulingContextStatus, s conversion.Scope) error {
|
||||
out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in *v1alpha2.PodSchedulingStatus, out *resource.PodSchedulingStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingStatus_To_resource_PodSchedulingStatus(in, out, s)
|
||||
// Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in *v1alpha2.PodSchedulingContextStatus, out *resource.PodSchedulingContextStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha2.PodSchedulingStatus, s conversion.Scope) error {
|
||||
func autoConvert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(in *resource.PodSchedulingContextStatus, out *v1alpha2.PodSchedulingContextStatus, s conversion.Scope) error {
|
||||
out.ResourceClaims = *(*[]v1alpha2.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in *resource.PodSchedulingStatus, out *v1alpha2.PodSchedulingStatus, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingStatus_To_v1alpha2_PodSchedulingStatus(in, out, s)
|
||||
// Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus is an autogenerated conversion function.
|
||||
func Convert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(in *resource.PodSchedulingContextStatus, out *v1alpha2.PodSchedulingContextStatus, s conversion.Scope) error {
|
||||
return autoConvert_resource_PodSchedulingContextStatus_To_v1alpha2_PodSchedulingContextStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_ResourceClaim_To_resource_ResourceClaim(in *v1alpha2.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error {
|
||||
|
@@ -253,33 +253,33 @@ func validateResourceClaimConsumers(consumers []resource.ResourceClaimConsumerRe
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodScheduling validates a PodScheduling.
|
||||
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingSpec(&resourceClaim.Spec, field.NewPath("spec"))...)
|
||||
// ValidatePodSchedulingContext validates a PodSchedulingContext.
|
||||
func ValidatePodSchedulingContexts(schedulingCtx *resource.PodSchedulingContext) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&schedulingCtx.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingSpec(&schedulingCtx.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingSpec(spec *resource.PodSchedulingSpec, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodSchedulingSpec(spec *resource.PodSchedulingContextSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validateSliceIsASet(spec.PotentialNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, fldPath.Child("potentialNodes"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSchedulingUpdate tests if an update to PodScheduling is valid.
|
||||
func ValidatePodSchedulingUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidatePodScheduling(resourceClaim)...)
|
||||
// ValidatePodSchedulingContextUpdate tests if an update to PodSchedulingContext is valid.
|
||||
func ValidatePodSchedulingContextUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidatePodSchedulingContexts(schedulingCtx)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSchedulingStatusUpdate tests if an update to the status of a PodScheduling is valid.
|
||||
func ValidatePodSchedulingStatusUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingStatus(&resourceClaim.Status, field.NewPath("status"))...)
|
||||
// ValidatePodSchedulingContextStatusUpdate tests if an update to the status of a PodSchedulingContext is valid.
|
||||
func ValidatePodSchedulingContextStatusUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingStatus(&schedulingCtx.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingStatus(status *resource.PodSchedulingStatus, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodSchedulingStatus(status *resource.PodSchedulingContextStatus, fldPath *field.Path) field.ErrorList {
|
||||
return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims"))
|
||||
}
|
||||
|
||||
|
@@ -1,338 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testPodScheduling(name, namespace string, spec resource.PodSchedulingSpec) *resource.PodScheduling {
|
||||
return &resource.PodScheduling{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodScheduling(t *testing.T) {
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
goodPodSchedulingSpec := resource.PodSchedulingSpec{}
|
||||
now := metav1.Now()
|
||||
badName := "!@#$%^"
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
scheduling *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-scheduling": {
|
||||
scheduling: testPodScheduling(goodName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
scheduling: testPodScheduling("", goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
scheduling: testPodScheduling(badName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
scheduling: testPodScheduling(goodName, "", goodPodSchedulingSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.GenerateName = "pvc-"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.ResourceVersion = "1"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Generation = 100
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.CreationTimestamp = now
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePodScheduling(scenario.scheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingUpdate(t *testing.T) {
|
||||
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodScheduling
|
||||
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
|
||||
},
|
||||
"add-selected-node": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Spec.SelectedNode = "worker1"
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"add-potential-nodes": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-too-long": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, badName)
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
|
||||
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodScheduling
|
||||
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
|
||||
},
|
||||
"add-claim-status": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-duplicated-claim-status": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < 2; i++ {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-too-long-claim-status": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-node-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
badName,
|
||||
)
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
@@ -0,0 +1,342 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testPodSchedulingContexts(name, namespace string, spec resource.PodSchedulingContextSpec) *resource.PodSchedulingContext {
|
||||
return &resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingContexts(t *testing.T) {
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
goodPodSchedulingSpec := resource.PodSchedulingContextSpec{}
|
||||
now := metav1.Now()
|
||||
badName := "!@#$%^"
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
schedulingCtx *resource.PodSchedulingContext
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-schedulingCtx": {
|
||||
schedulingCtx: testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
schedulingCtx: testPodSchedulingContexts("", goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
schedulingCtx: testPodSchedulingContexts(badName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
schedulingCtx: testPodSchedulingContexts(goodName, "", goodPodSchedulingSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.GenerateName = "pvc-"
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.ResourceVersion = "1"
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Generation = 100
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.CreationTimestamp = now
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
schedulingCtx: func() *resource.PodSchedulingContext {
|
||||
schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec)
|
||||
schedulingCtx.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return schedulingCtx
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePodSchedulingContexts(scenario.schedulingCtx)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingUpdate(t *testing.T) {
|
||||
validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodSchedulingContext
|
||||
update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"add-selected-node": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Spec.SelectedNode = "worker1"
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"add-potential-nodes": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-too-long": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, badName)
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingContextUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
|
||||
validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{})
|
||||
badName := "!@#$%^"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodSchedulingContext
|
||||
update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"add-claim-status": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-duplicated-claim-status": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
for i := 0; i < 2; i++ {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
|
||||
)
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-too-long-claim-status": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
"invalid-node-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext {
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
badName,
|
||||
)
|
||||
return schedulingCtx
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingContextStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
38
pkg/apis/resource/zz_generated.deepcopy.go
generated
38
pkg/apis/resource/zz_generated.deepcopy.go
generated
@@ -48,7 +48,7 @@ func (in *AllocationResult) DeepCopy() *AllocationResult {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodScheduling) DeepCopyInto(out *PodScheduling) {
|
||||
func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
@@ -57,18 +57,18 @@ func (in *PodScheduling) DeepCopyInto(out *PodScheduling) {
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling.
|
||||
func (in *PodScheduling) DeepCopy() *PodScheduling {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext.
|
||||
func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodScheduling)
|
||||
out := new(PodSchedulingContext)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PodScheduling) DeepCopyObject() runtime.Object {
|
||||
func (in *PodSchedulingContext) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
@@ -76,13 +76,13 @@ func (in *PodScheduling) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) {
|
||||
func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]PodScheduling, len(*in))
|
||||
*out = make([]PodSchedulingContext, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
@@ -90,18 +90,18 @@ func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) {
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList.
|
||||
func (in *PodSchedulingList) DeepCopy() *PodSchedulingList {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList.
|
||||
func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSchedulingList)
|
||||
out := new(PodSchedulingContextList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PodSchedulingList) DeepCopyObject() runtime.Object {
|
||||
func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func (in *PodSchedulingList) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) {
|
||||
func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) {
|
||||
*out = *in
|
||||
if in.PotentialNodes != nil {
|
||||
in, out := &in.PotentialNodes, &out.PotentialNodes
|
||||
@@ -119,18 +119,18 @@ func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) {
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec.
|
||||
func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec.
|
||||
func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSchedulingSpec)
|
||||
out := new(PodSchedulingContextSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) {
|
||||
func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) {
|
||||
*out = *in
|
||||
if in.ResourceClaims != nil {
|
||||
in, out := &in.ResourceClaims, &out.ResourceClaims
|
||||
@@ -142,12 +142,12 @@ func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) {
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus.
|
||||
func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus.
|
||||
func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSchedulingStatus)
|
||||
out := new(PodSchedulingContextStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
36
pkg/generated/openapi/zz_generated.openapi.go
generated
36
pkg/generated/openapi/zz_generated.openapi.go
generated
@@ -825,10 +825,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"k8s.io/api/rbac/v1beta1.RoleRef": schema_k8sio_api_rbac_v1beta1_RoleRef(ref),
|
||||
"k8s.io/api/rbac/v1beta1.Subject": schema_k8sio_api_rbac_v1beta1_Subject(ref),
|
||||
"k8s.io/api/resource/v1alpha2.AllocationResult": schema_k8sio_api_resource_v1alpha2_AllocationResult(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodScheduling": schema_k8sio_api_resource_v1alpha2_PodScheduling(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingList": schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingSpec": schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingStatus": schema_k8sio_api_resource_v1alpha2_PodSchedulingStatus(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingContext": schema_k8sio_api_resource_v1alpha2_PodSchedulingContext(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingContextList": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextList(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextSpec(ref),
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingContextStatus": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextStatus(ref),
|
||||
"k8s.io/api/resource/v1alpha2.ResourceClaim": schema_k8sio_api_resource_v1alpha2_ResourceClaim(ref),
|
||||
"k8s.io/api/resource/v1alpha2.ResourceClaimConsumerReference": schema_k8sio_api_resource_v1alpha2_ResourceClaimConsumerReference(ref),
|
||||
"k8s.io/api/resource/v1alpha2.ResourceClaimList": schema_k8sio_api_resource_v1alpha2_ResourceClaimList(ref),
|
||||
@@ -41156,11 +41156,11 @@ func schema_k8sio_api_resource_v1alpha2_AllocationResult(ref common.ReferenceCal
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingContext(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
|
||||
Description: "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
@@ -41188,14 +41188,14 @@ func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallba
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Spec describes where resources for the Pod are needed.",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingSpec"),
|
||||
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec"),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Status describes where resources for the Pod can be allocated.",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingStatus"),
|
||||
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingContextStatus"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -41203,15 +41203,15 @@ func schema_k8sio_api_resource_v1alpha2_PodScheduling(ref common.ReferenceCallba
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingSpec", "k8s.io/api/resource/v1alpha2.PodSchedulingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec", "k8s.io/api/resource/v1alpha2.PodSchedulingContextStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingContextList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "PodSchedulingList is a collection of Pod scheduling objects.",
|
||||
Description: "PodSchedulingContextList is a collection of Pod scheduling objects.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
@@ -41237,13 +41237,13 @@ func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCa
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Items is the list of PodScheduling objects.",
|
||||
Description: "Items is the list of PodSchedulingContext objects.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/resource/v1alpha2.PodScheduling"),
|
||||
Ref: ref("k8s.io/api/resource/v1alpha2.PodSchedulingContext"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -41254,15 +41254,15 @@ func schema_k8sio_api_resource_v1alpha2_PodSchedulingList(ref common.ReferenceCa
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/resource/v1alpha2.PodScheduling", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
"k8s.io/api/resource/v1alpha2.PodSchedulingContext", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingContextSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "PodSchedulingSpec describes where resources for the Pod are needed.",
|
||||
Description: "PodSchedulingContextSpec describes where resources for the Pod are needed.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"selectedNode": {
|
||||
@@ -41298,11 +41298,11 @@ func schema_k8sio_api_resource_v1alpha2_PodSchedulingSpec(ref common.ReferenceCa
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_k8sio_api_resource_v1alpha2_PodSchedulingContextStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "PodSchedulingStatus describes where resources for the Pod can be allocated.",
|
||||
Description: "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"resourceClaims": {
|
||||
|
@@ -638,13 +638,13 @@ func AddHandlers(h printers.PrintHandler) {
|
||||
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate)
|
||||
_ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplateList)
|
||||
|
||||
podSchedulingColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
podSchedulingCtxColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingSpec{}.SwaggerDoc()["selectedNode"]},
|
||||
{Name: "SelectedNode", Type: "string", Description: resourcev1alpha2.PodSchedulingContextSpec{}.SwaggerDoc()["selectedNode"]},
|
||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||
}
|
||||
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodScheduling)
|
||||
_ = h.TableHandler(podSchedulingColumnDefinitions, printPodSchedulingList)
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext)
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList)
|
||||
}
|
||||
|
||||
// Pass ports=nil for all ports.
|
||||
@@ -2870,7 +2870,7 @@ func printResourceClaimTemplateList(list *resource.ResourceClaimTemplateList, op
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
func printPodSchedulingContext(obj *resource.PodSchedulingContext, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
@@ -2879,10 +2879,10 @@ func printPodScheduling(obj *resource.PodScheduling, options printers.GenerateOp
|
||||
return []metav1.TableRow{row}, nil
|
||||
}
|
||||
|
||||
func printPodSchedulingList(list *resource.PodSchedulingList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
func printPodSchedulingContextList(list *resource.PodSchedulingContextList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
for i := range list.Items {
|
||||
r, err := printPodScheduling(&list.Items[i], options)
|
||||
r, err := printPodSchedulingContext(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -28,54 +28,54 @@ import (
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/podscheduling"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for PodSchedulings.
|
||||
// REST implements a RESTStorage for PodSchedulingContext.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against PodSchedulings.
|
||||
// NewREST returns a RESTStorage object that will work against PodSchedulingContext.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &resource.PodScheduling{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.PodSchedulingList{} },
|
||||
PredicateFunc: podscheduling.Match,
|
||||
DefaultQualifiedResource: resource.Resource("podschedulings"),
|
||||
SingularQualifiedResource: resource.Resource("podscheduling"),
|
||||
NewFunc: func() runtime.Object { return &resource.PodSchedulingContext{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.PodSchedulingContextList{} },
|
||||
PredicateFunc: podschedulingcontext.Match,
|
||||
DefaultQualifiedResource: resource.Resource("podschedulingcontexts"),
|
||||
SingularQualifiedResource: resource.Resource("podschedulingcontext"),
|
||||
|
||||
CreateStrategy: podscheduling.Strategy,
|
||||
UpdateStrategy: podscheduling.Strategy,
|
||||
DeleteStrategy: podscheduling.Strategy,
|
||||
CreateStrategy: podschedulingcontext.Strategy,
|
||||
UpdateStrategy: podschedulingcontext.Strategy,
|
||||
DeleteStrategy: podschedulingcontext.Strategy,
|
||||
ReturnDeletedObject: true,
|
||||
ResetFieldsStrategy: podscheduling.Strategy,
|
||||
ResetFieldsStrategy: podschedulingcontext.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podscheduling.GetAttrs}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podschedulingcontext.GetAttrs}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statusStore := *store
|
||||
statusStore.UpdateStrategy = podscheduling.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = podscheduling.StatusStrategy
|
||||
statusStore.UpdateStrategy = podschedulingcontext.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = podschedulingcontext.StatusStrategy
|
||||
|
||||
rest := &REST{store}
|
||||
|
||||
return rest, &StatusREST{store: &statusStore}, nil
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of a PodScheduling.
|
||||
// StatusREST implements the REST endpoint for changing the status of a PodSchedulingContext.
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
||||
// New creates a new PodScheduling object.
|
||||
// New creates a new PodSchedulingContext object.
|
||||
func (r *StatusREST) New() runtime.Object {
|
||||
return &resource.PodScheduling{}
|
||||
return &resource.PodSchedulingContext{}
|
||||
}
|
||||
|
||||
func (r *StatusREST) Destroy() {
|
@@ -41,7 +41,7 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "podschedulings",
|
||||
ResourcePrefix: "podschedulingcontexts",
|
||||
}
|
||||
podSchedulingStorage, statusStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
@@ -50,18 +50,18 @@ func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer)
|
||||
return podSchedulingStorage, statusStorage, server
|
||||
}
|
||||
|
||||
func validNewPodScheduling(name, ns string) *resource.PodScheduling {
|
||||
scheduling := &resource.PodScheduling{
|
||||
func validNewPodSchedulingContexts(name, ns string) *resource.PodSchedulingContext {
|
||||
schedulingCtx := &resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: resource.PodSchedulingSpec{
|
||||
Spec: resource.PodSchedulingContextSpec{
|
||||
SelectedNode: "worker",
|
||||
},
|
||||
Status: resource.PodSchedulingStatus{},
|
||||
Status: resource.PodSchedulingContextStatus{},
|
||||
}
|
||||
return scheduling
|
||||
return schedulingCtx
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
@@ -69,13 +69,13 @@ func TestCreate(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
scheduling := validNewPodScheduling("foo", metav1.NamespaceDefault)
|
||||
scheduling.ObjectMeta = metav1.ObjectMeta{}
|
||||
schedulingCtx := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)
|
||||
schedulingCtx.ObjectMeta = metav1.ObjectMeta{}
|
||||
test.TestCreate(
|
||||
// valid
|
||||
scheduling,
|
||||
schedulingCtx,
|
||||
// invalid
|
||||
&resource.PodScheduling{
|
||||
&resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
|
||||
},
|
||||
)
|
||||
@@ -88,10 +88,10 @@ func TestUpdate(t *testing.T) {
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewPodScheduling("foo", metav1.NamespaceDefault),
|
||||
validNewPodSchedulingContexts("foo", metav1.NamespaceDefault),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.PodScheduling)
|
||||
object := obj.(*resource.PodSchedulingContext)
|
||||
if object.Labels == nil {
|
||||
object.Labels = map[string]string{}
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func TestDelete(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
|
||||
test.TestDelete(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
test.TestDelete(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
@@ -114,7 +114,7 @@ func TestGet(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestGet(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
test.TestGet(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
@@ -122,7 +122,7 @@ func TestList(t *testing.T) {
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestList(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
test.TestList(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
@@ -131,7 +131,7 @@ func TestWatch(t *testing.T) {
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestWatch(
|
||||
validNewPodScheduling("foo", metav1.NamespaceDefault),
|
||||
validNewPodSchedulingContexts("foo", metav1.NamespaceDefault),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
@@ -156,19 +156,19 @@ func TestUpdateStatus(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
|
||||
key, _ := storage.KeyFunc(ctx, "foo")
|
||||
schedulingStart := validNewPodScheduling("foo", metav1.NamespaceDefault)
|
||||
schedulingStart := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)
|
||||
err := storage.Storage.Create(ctx, key, schedulingStart, nil, 0, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
scheduling := schedulingStart.DeepCopy()
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
schedulingCtx := schedulingStart.DeepCopy()
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
_, _, err = statusStorage.Update(ctx, scheduling.Name, rest.DefaultUpdatedObjectInfo(scheduling), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
_, _, err = statusStorage.Update(ctx, schedulingCtx.Name, rest.DefaultUpdatedObjectInfo(schedulingCtx), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
@@ -176,9 +176,9 @@ func TestUpdateStatus(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
schedulingOut := obj.(*resource.PodScheduling)
|
||||
schedulingOut := obj.(*resource.PodSchedulingContext)
|
||||
// only compare relevant changes b/c of difference in metadata
|
||||
if !apiequality.Semantic.DeepEqual(scheduling.Status, schedulingOut.Status) {
|
||||
t.Errorf("unexpected object: %s", diff.ObjectDiff(scheduling.Status, schedulingOut.Status))
|
||||
if !apiequality.Semantic.DeepEqual(schedulingCtx.Status, schedulingOut.Status) {
|
||||
t.Errorf("unexpected object: %s", diff.ObjectDiff(schedulingCtx.Status, schedulingOut.Status))
|
||||
}
|
||||
}
|
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podscheduling
|
||||
package podschedulingcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// podSchedulingStrategy implements behavior for PodScheduling objects
|
||||
// podSchedulingStrategy implements behavior for PodSchedulingContext objects
|
||||
type podSchedulingStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
@@ -48,7 +48,7 @@ func (podSchedulingStrategy) NamespaceScoped() bool {
|
||||
}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy and
|
||||
// should not be modified by the user. For a new PodScheduling that is the
|
||||
// should not be modified by the user. For a new PodSchedulingContext that is the
|
||||
// status.
|
||||
func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
@@ -61,14 +61,14 @@ func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpat
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
scheduling := obj.(*resource.PodScheduling)
|
||||
scheduling := obj.(*resource.PodSchedulingContext)
|
||||
// Status must not be set by user on create.
|
||||
scheduling.Status = resource.PodSchedulingStatus{}
|
||||
scheduling.Status = resource.PodSchedulingContextStatus{}
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
scheduling := obj.(*resource.PodScheduling)
|
||||
return validation.ValidatePodScheduling(scheduling)
|
||||
scheduling := obj.(*resource.PodSchedulingContext)
|
||||
return validation.ValidatePodSchedulingContexts(scheduling)
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
@@ -83,16 +83,16 @@ func (podSchedulingStrategy) AllowCreateOnUpdate() bool {
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
newScheduling.Status = oldScheduling.Status
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
errorList := validation.ValidatePodScheduling(newScheduling)
|
||||
return append(errorList, validation.ValidatePodSchedulingUpdate(newScheduling, oldScheduling)...)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
errorList := validation.ValidatePodSchedulingContexts(newScheduling)
|
||||
return append(errorList, validation.ValidatePodSchedulingContextUpdate(newScheduling, oldScheduling)...)
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
@@ -122,15 +122,15 @@ func (podSchedulingStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fi
|
||||
}
|
||||
|
||||
func (podSchedulingStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
newScheduling.Spec = oldScheduling.Spec
|
||||
}
|
||||
|
||||
func (podSchedulingStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
return validation.ValidatePodSchedulingStatusUpdate(newScheduling, oldScheduling)
|
||||
newScheduling := obj.(*resource.PodSchedulingContext)
|
||||
oldScheduling := old.(*resource.PodSchedulingContext)
|
||||
return validation.ValidatePodSchedulingContextStatusUpdate(newScheduling, oldScheduling)
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
@@ -149,15 +149,15 @@ func Match(label labels.Selector, field fields.Selector) storage.SelectionPredic
|
||||
|
||||
// GetAttrs returns labels and fields of a given object for filtering purposes.
|
||||
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
scheduling, ok := obj.(*resource.PodScheduling)
|
||||
scheduling, ok := obj.(*resource.PodSchedulingContext)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("not a PodScheduling")
|
||||
return nil, nil, errors.New("not a PodSchedulingContext")
|
||||
}
|
||||
return labels.Set(scheduling.Labels), toSelectableFields(scheduling), nil
|
||||
}
|
||||
|
||||
// toSelectableFields returns a field set that represents the object
|
||||
func toSelectableFields(scheduling *resource.PodScheduling) fields.Set {
|
||||
func toSelectableFields(scheduling *resource.PodSchedulingContext) fields.Set {
|
||||
fields := generic.ObjectMetaFieldsSet(&scheduling.ObjectMeta, true)
|
||||
return fields
|
||||
}
|
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podscheduling
|
||||
package podschedulingcontext
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -24,31 +24,31 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
var podScheduling = &resource.PodScheduling{
|
||||
var schedulingCtx = &resource.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: resource.PodSchedulingSpec{
|
||||
Spec: resource.PodSchedulingContextSpec{
|
||||
SelectedNode: "worker",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategy(t *testing.T) {
|
||||
if !Strategy.NamespaceScoped() {
|
||||
t.Errorf("PodScheduling must be namespace scoped")
|
||||
t.Errorf("PodSchedulingContext must be namespace scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("PodScheduling should not allow create on update")
|
||||
t.Errorf("PodSchedulingContext should not allow create on update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategyCreate(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
schedulingCtx := schedulingCtx.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(ctx, podScheduling)
|
||||
errs := Strategy.Validate(ctx, podScheduling)
|
||||
Strategy.PrepareForCreate(ctx, schedulingCtx)
|
||||
errs := Strategy.Validate(ctx, schedulingCtx)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected error validating for create %v", errs)
|
||||
}
|
||||
@@ -57,12 +57,12 @@ func TestPodSchedulingStrategyCreate(t *testing.T) {
|
||||
func TestPodSchedulingStrategyUpdate(t *testing.T) {
|
||||
t.Run("no-changes-okay", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling.ResourceVersion = "4"
|
||||
schedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling)
|
||||
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling)
|
||||
Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected validation errors: %v", errs)
|
||||
}
|
||||
@@ -70,13 +70,13 @@ func TestPodSchedulingStrategyUpdate(t *testing.T) {
|
||||
|
||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling.Name = "valid-claim-2"
|
||||
newPodScheduling.ResourceVersion = "4"
|
||||
schedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx := schedulingCtx.DeepCopy()
|
||||
newSchedulingCtx.Name = "valid-claim-2"
|
||||
newSchedulingCtx.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling)
|
||||
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling)
|
||||
Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected a validation error")
|
||||
}
|
@@ -24,7 +24,7 @@ import (
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
podschedulingstore "k8s.io/kubernetes/pkg/registry/resource/podscheduling/storage"
|
||||
podschedulingcontextsstore "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext/storage"
|
||||
resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage"
|
||||
resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage"
|
||||
resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage"
|
||||
@@ -74,8 +74,8 @@ func (p RESTStorageProvider) v1alpha2Storage(apiResourceConfigSource serverstora
|
||||
storage[resource] = resourceClaimTemplateStorage
|
||||
}
|
||||
|
||||
if resource := "podschedulings"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
|
||||
podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingstore.NewREST(restOptionsGetter)
|
||||
if resource := "podschedulingcontexts"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
|
||||
podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingcontextsstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -378,10 +378,10 @@ func addAllEventHandlers(
|
||||
informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.PersistentVolumeClaim, "Pvc"),
|
||||
)
|
||||
case framework.PodScheduling:
|
||||
case framework.PodSchedulingContext:
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
_, _ = informerFactory.Resource().V1alpha2().PodSchedulings().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.PodScheduling, "PodScheduling"),
|
||||
_, _ = informerFactory.Resource().V1alpha2().PodSchedulingContexts().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.PodSchedulingContext, "PodSchedulingContext"),
|
||||
)
|
||||
}
|
||||
case framework.ResourceClaim:
|
||||
|
@@ -74,14 +74,14 @@ type stateData struct {
|
||||
// protected by the mutex. Used by PostFilter.
|
||||
unavailableClaims sets.Int
|
||||
|
||||
// A pointer to the PodScheduling object for the pod, if one exists.
|
||||
// A pointer to the PodSchedulingContext object for the pod, if one exists.
|
||||
// Gets set on demand.
|
||||
//
|
||||
// Conceptually, this object belongs into the scheduler framework
|
||||
// where it might get shared by different plugins. But in practice,
|
||||
// it is currently only used by dynamic provisioning and thus
|
||||
// managed entirely here.
|
||||
podScheduling *resourcev1alpha2.PodScheduling
|
||||
schedulingCtx *resourcev1alpha2.PodSchedulingContext
|
||||
|
||||
// podSchedulingDirty is true if the current copy was locally modified.
|
||||
podSchedulingDirty bool
|
||||
@@ -112,23 +112,23 @@ func (d *stateData) updateClaimStatus(ctx context.Context, clientset kubernetes.
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializePodScheduling can be called concurrently. It returns an existing PodScheduling
|
||||
// initializePodSchedulingContext can be called concurrently. It returns an existing PodSchedulingContext
|
||||
// object if there is one already, retrieves one if not, or as a last resort creates
|
||||
// one from scratch.
|
||||
func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, podSchedulingLister resourcev1alpha2listers.PodSchedulingLister) (*resourcev1alpha2.PodScheduling, error) {
|
||||
// TODO (#113701): check if this mutex locking can be avoided by calling initializePodScheduling during PreFilter.
|
||||
func (d *stateData) initializePodSchedulingContexts(ctx context.Context, pod *v1.Pod, podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister) (*resourcev1alpha2.PodSchedulingContext, error) {
|
||||
// TODO (#113701): check if this mutex locking can be avoided by calling initializePodSchedulingContext during PreFilter.
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
if d.podScheduling != nil {
|
||||
return d.podScheduling, nil
|
||||
if d.schedulingCtx != nil {
|
||||
return d.schedulingCtx, nil
|
||||
}
|
||||
|
||||
podScheduling, err := podSchedulingLister.PodSchedulings(pod.Namespace).Get(pod.Name)
|
||||
schedulingCtx, err := podSchedulingContextLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name)
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
controller := true
|
||||
podScheduling = &resourcev1alpha2.PodScheduling{
|
||||
schedulingCtx = &resourcev1alpha2.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
@@ -148,56 +148,56 @@ func (d *stateData) initializePodScheduling(ctx context.Context, pod *v1.Pod, po
|
||||
return nil, err
|
||||
default:
|
||||
// We have an object, but it might be obsolete.
|
||||
if !metav1.IsControlledBy(podScheduling, pod) {
|
||||
return nil, fmt.Errorf("PodScheduling object with UID %s is not owned by Pod %s/%s", podScheduling.UID, pod.Namespace, pod.Name)
|
||||
if !metav1.IsControlledBy(schedulingCtx, pod) {
|
||||
return nil, fmt.Errorf("PodSchedulingContext object with UID %s is not owned by Pod %s/%s", schedulingCtx.UID, pod.Namespace, pod.Name)
|
||||
}
|
||||
}
|
||||
d.podScheduling = podScheduling
|
||||
return podScheduling, err
|
||||
d.schedulingCtx = schedulingCtx
|
||||
return schedulingCtx, err
|
||||
}
|
||||
|
||||
// publishPodScheduling creates or updates the PodScheduling object.
|
||||
func (d *stateData) publishPodScheduling(ctx context.Context, clientset kubernetes.Interface, podScheduling *resourcev1alpha2.PodScheduling) error {
|
||||
// publishPodSchedulingContext creates or updates the PodSchedulingContext object.
|
||||
func (d *stateData) publishPodSchedulingContexts(ctx context.Context, clientset kubernetes.Interface, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
var err error
|
||||
logger := klog.FromContext(ctx)
|
||||
msg := "Updating PodScheduling"
|
||||
if podScheduling.UID == "" {
|
||||
msg = "Creating PodScheduling"
|
||||
msg := "Updating PodSchedulingContext"
|
||||
if schedulingCtx.UID == "" {
|
||||
msg = "Creating PodSchedulingContext"
|
||||
}
|
||||
if loggerV := logger.V(6); loggerV.Enabled() {
|
||||
// At a high enough log level, dump the entire object.
|
||||
loggerV.Info(msg, "podschedulingDump", podScheduling)
|
||||
loggerV.Info(msg, "podSchedulingCtxDump", schedulingCtx)
|
||||
} else {
|
||||
logger.V(5).Info(msg, "podscheduling", klog.KObj(podScheduling))
|
||||
logger.V(5).Info(msg, "podSchedulingCtx", klog.KObj(schedulingCtx))
|
||||
}
|
||||
if podScheduling.UID == "" {
|
||||
podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Create(ctx, podScheduling, metav1.CreateOptions{})
|
||||
if schedulingCtx.UID == "" {
|
||||
schedulingCtx, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Create(ctx, schedulingCtx, metav1.CreateOptions{})
|
||||
} else {
|
||||
// TODO (#113700): patch here to avoid racing with drivers which update the status.
|
||||
podScheduling, err = clientset.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).Update(ctx, podScheduling, metav1.UpdateOptions{})
|
||||
schedulingCtx, err = clientset.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).Update(ctx, schedulingCtx, metav1.UpdateOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.podScheduling = podScheduling
|
||||
d.schedulingCtx = schedulingCtx
|
||||
d.podSchedulingDirty = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// storePodScheduling replaces the pod scheduling object in the state.
|
||||
func (d *stateData) storePodScheduling(podScheduling *resourcev1alpha2.PodScheduling) {
|
||||
// storePodSchedulingContext replaces the pod schedulingCtx object in the state.
|
||||
func (d *stateData) storePodSchedulingContexts(schedulingCtx *resourcev1alpha2.PodSchedulingContext) {
|
||||
d.mutex.Lock()
|
||||
defer d.mutex.Unlock()
|
||||
|
||||
d.podScheduling = podScheduling
|
||||
d.schedulingCtx = schedulingCtx
|
||||
d.podSchedulingDirty = true
|
||||
}
|
||||
|
||||
func statusForClaim(podScheduling *resourcev1alpha2.PodScheduling, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus {
|
||||
for _, status := range podScheduling.Status.ResourceClaims {
|
||||
func statusForClaim(schedulingCtx *resourcev1alpha2.PodSchedulingContext, podClaimName string) *resourcev1alpha2.ResourceClaimSchedulingStatus {
|
||||
for _, status := range schedulingCtx.Status.ResourceClaims {
|
||||
if status.Name == podClaimName {
|
||||
return &status
|
||||
}
|
||||
@@ -207,11 +207,11 @@ func statusForClaim(podScheduling *resourcev1alpha2.PodScheduling, podClaimName
|
||||
|
||||
// dynamicResources is a plugin that ensures that ResourceClaims are allocated.
|
||||
type dynamicResources struct {
|
||||
enabled bool
|
||||
clientset kubernetes.Interface
|
||||
claimLister resourcev1alpha2listers.ResourceClaimLister
|
||||
classLister resourcev1alpha2listers.ResourceClassLister
|
||||
podSchedulingLister resourcev1alpha2listers.PodSchedulingLister
|
||||
enabled bool
|
||||
clientset kubernetes.Interface
|
||||
claimLister resourcev1alpha2listers.ResourceClaimLister
|
||||
classLister resourcev1alpha2listers.ResourceClassLister
|
||||
podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
@@ -222,11 +222,11 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram
|
||||
}
|
||||
|
||||
return &dynamicResources{
|
||||
enabled: true,
|
||||
clientset: fh.ClientSet(),
|
||||
claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(),
|
||||
classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(),
|
||||
podSchedulingLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulings().Lister(),
|
||||
enabled: true,
|
||||
clientset: fh.ClientSet(),
|
||||
claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(),
|
||||
classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(),
|
||||
podSchedulingContextLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulingContexts().Lister(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -257,7 +257,7 @@ func (pl *dynamicResources) EventsToRegister() []framework.ClusterEvent {
|
||||
// may be schedulable.
|
||||
// TODO (#113702): can we change this so that such an event does not trigger *all* pods?
|
||||
// Yes: https://github.com/kubernetes/kubernetes/blob/abcbaed0784baf5ed2382aae9705a8918f2daa18/pkg/scheduler/eventhandlers.go#L70
|
||||
{Resource: framework.PodScheduling, ActionType: framework.Add | framework.Update},
|
||||
{Resource: framework.PodSchedulingContext, ActionType: framework.Add | framework.Update},
|
||||
// A resource might depend on node labels for topology filtering.
|
||||
// A new or updated node may make pods schedulable.
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel},
|
||||
@@ -436,11 +436,11 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState
|
||||
}
|
||||
|
||||
// Now we need information from drivers.
|
||||
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister)
|
||||
schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
|
||||
if err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
status := statusForClaim(podScheduling, pod.Spec.ResourceClaims[index].Name)
|
||||
status := statusForClaim(schedulingCtx, pod.Spec.ResourceClaims[index].Name)
|
||||
if status != nil {
|
||||
for _, unsuitableNode := range status.UnsuitableNodes {
|
||||
if node.Name == unsuitableNode {
|
||||
@@ -530,7 +530,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister)
|
||||
schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
|
||||
if err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
@@ -540,22 +540,22 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
if pending && !haveAllNodes(podScheduling.Spec.PotentialNodes, nodes) {
|
||||
if pending && !haveAllNodes(schedulingCtx.Spec.PotentialNodes, nodes) {
|
||||
// Remember the potential nodes. The object will get created or
|
||||
// updated in Reserve. This is both an optimization and
|
||||
// covers the case that PreScore doesn't get called when there
|
||||
// is only a single node.
|
||||
logger.V(5).Info("remembering potential nodes", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes))
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
numNodes := len(nodes)
|
||||
if numNodes > resourcev1alpha2.PodSchedulingNodeListMaxSize {
|
||||
numNodes = resourcev1alpha2.PodSchedulingNodeListMaxSize
|
||||
}
|
||||
podScheduling.Spec.PotentialNodes = make([]string, 0, numNodes)
|
||||
schedulingCtx.Spec.PotentialNodes = make([]string, 0, numNodes)
|
||||
if numNodes == len(nodes) {
|
||||
// Copy all node names.
|
||||
for _, node := range nodes {
|
||||
podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, node.Name)
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, node.Name)
|
||||
}
|
||||
} else {
|
||||
// Select a random subset of the nodes to comply with
|
||||
@@ -567,14 +567,14 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
|
||||
nodeNames[node.Name] = struct{}{}
|
||||
}
|
||||
for nodeName := range nodeNames {
|
||||
if len(podScheduling.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize {
|
||||
if len(schedulingCtx.Spec.PotentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize {
|
||||
break
|
||||
}
|
||||
podScheduling.Spec.PotentialNodes = append(podScheduling.Spec.PotentialNodes, nodeName)
|
||||
schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, nodeName)
|
||||
}
|
||||
}
|
||||
sort.Strings(podScheduling.Spec.PotentialNodes)
|
||||
state.storePodScheduling(podScheduling)
|
||||
sort.Strings(schedulingCtx.Spec.PotentialNodes)
|
||||
state.storePodSchedulingContexts(schedulingCtx)
|
||||
}
|
||||
logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", nodes)
|
||||
return nil
|
||||
@@ -614,7 +614,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
numDelayedAllocationPending := 0
|
||||
numClaimsWithStatusInfo := 0
|
||||
logger := klog.FromContext(ctx)
|
||||
podScheduling, err := state.initializePodScheduling(ctx, pod, pl.podSchedulingLister)
|
||||
schedulingCtx, err := state.initializePodSchedulingContexts(ctx, pod, pl.podSchedulingContextLister)
|
||||
if err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
@@ -639,7 +639,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
return statusError(logger, err)
|
||||
}
|
||||
// If we get here, we know that reserving the claim for
|
||||
// the pod worked and we can proceed with scheduling
|
||||
// the pod worked and we can proceed with schedulingCtx
|
||||
// it.
|
||||
} else {
|
||||
// Must be delayed allocation.
|
||||
@@ -647,7 +647,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
|
||||
// Did the driver provide information that steered node
|
||||
// selection towards a node that it can support?
|
||||
if statusForClaim(podScheduling, pod.Spec.ResourceClaims[index].Name) != nil {
|
||||
if statusForClaim(schedulingCtx, pod.Spec.ResourceClaims[index].Name) != nil {
|
||||
numClaimsWithStatusInfo++
|
||||
}
|
||||
}
|
||||
@@ -659,13 +659,13 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
}
|
||||
|
||||
podSchedulingDirty := state.podSchedulingDirty
|
||||
if len(podScheduling.Spec.PotentialNodes) == 0 {
|
||||
if len(schedulingCtx.Spec.PotentialNodes) == 0 {
|
||||
// PreScore was not called, probably because there was
|
||||
// only one candidate. We need to ask whether that
|
||||
// node is suitable, otherwise the scheduler will pick
|
||||
// it forever even when it cannot satisfy the claim.
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Spec.PotentialNodes = []string{nodeName}
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
schedulingCtx.Spec.PotentialNodes = []string{nodeName}
|
||||
logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
podSchedulingDirty = true
|
||||
}
|
||||
@@ -675,16 +675,16 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
// the driver yet. Otherwise we wait for information before blindly
|
||||
// making a decision that might have to be reversed later.
|
||||
if numDelayedAllocationPending == 1 || numClaimsWithStatusInfo == numDelayedAllocationPending {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
// TODO: can we increase the chance that the scheduler picks
|
||||
// the same node as before when allocation is on-going,
|
||||
// assuming that that node still fits the pod? Picking a
|
||||
// different node may lead to some claims being allocated for
|
||||
// one node and others for another, which then would have to be
|
||||
// resolved with deallocation.
|
||||
podScheduling.Spec.SelectedNode = nodeName
|
||||
schedulingCtx.Spec.SelectedNode = nodeName
|
||||
logger.V(5).Info("start allocation", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
if err := state.publishPodScheduling(ctx, pl.clientset, podScheduling); err != nil {
|
||||
if err := state.publishPodSchedulingContexts(ctx, pl.clientset, schedulingCtx); err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
return statusUnschedulable(logger, "waiting for resource driver to allocate resource", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
@@ -692,14 +692,14 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
|
||||
// May have been modified earlier in PreScore or above.
|
||||
if podSchedulingDirty {
|
||||
if err := state.publishPodScheduling(ctx, pl.clientset, podScheduling); err != nil {
|
||||
if err := state.publishPodSchedulingContexts(ctx, pl.clientset, schedulingCtx); err != nil {
|
||||
return statusError(logger, err)
|
||||
}
|
||||
}
|
||||
|
||||
// More than one pending claim and not enough information about all of them.
|
||||
//
|
||||
// TODO: can or should we ensure that scheduling gets aborted while
|
||||
// TODO: can or should we ensure that schedulingCtx gets aborted while
|
||||
// waiting for resources *before* triggering delayed volume
|
||||
// provisioning? On the one hand, volume provisioning is currently
|
||||
// irreversible, so it better should come last. On the other hand,
|
||||
@@ -737,7 +737,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
|
||||
claim.Status.ReservedFor = reservedFor
|
||||
logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim))
|
||||
if err := state.updateClaimStatus(ctx, pl.clientset, index, claim); err != nil {
|
||||
// We will get here again when pod scheduling
|
||||
// We will get here again when pod schedulingCtx
|
||||
// is retried.
|
||||
logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim))
|
||||
}
|
||||
@@ -746,7 +746,7 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
|
||||
}
|
||||
|
||||
// PostBind is called after a pod is successfully bound to a node. Now we are
|
||||
// sure that a PodScheduling object, if it exists, is definitely not going to
|
||||
// sure that a PodSchedulingContext object, if it exists, is definitely not going to
|
||||
// be needed anymore and can delete it. This is a one-shot thing, there won't
|
||||
// be any retries. This is okay because it should usually work and in those
|
||||
// cases where it doesn't, the garbage collector will eventually clean up.
|
||||
@@ -762,19 +762,19 @@ func (pl *dynamicResources) PostBind(ctx context.Context, cs *framework.CycleSta
|
||||
return
|
||||
}
|
||||
|
||||
// We cannot know for sure whether the PodScheduling object exists. We
|
||||
// might have created it in the previous pod scheduling cycle and not
|
||||
// We cannot know for sure whether the PodSchedulingContext object exists. We
|
||||
// might have created it in the previous pod schedulingCtx cycle and not
|
||||
// have it in our informer cache yet. Let's try to delete, just to be
|
||||
// on the safe side.
|
||||
logger := klog.FromContext(ctx)
|
||||
err = pl.clientset.ResourceV1alpha2().PodSchedulings(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
err = pl.clientset.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
logger.V(5).Info("no PodScheduling object to delete")
|
||||
logger.V(5).Info("no PodSchedulingContext object to delete")
|
||||
case err != nil:
|
||||
logger.Error(err, "delete PodScheduling")
|
||||
logger.Error(err, "delete PodSchedulingContext")
|
||||
default:
|
||||
logger.V(5).Info("PodScheduling object deleted")
|
||||
logger.V(5).Info("PodSchedulingContext object deleted")
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -125,16 +125,16 @@ var (
|
||||
ResourceClassName(className).
|
||||
Obj()
|
||||
|
||||
scheduling = st.MakePodScheduling().Name(podName).Namespace(namespace).
|
||||
scheduling = st.MakePodSchedulingContexts().Name(podName).Namespace(namespace).
|
||||
OwnerReference(podName, podUID, podKind).
|
||||
Obj()
|
||||
schedulingPotential = st.FromPodScheduling(scheduling).
|
||||
schedulingPotential = st.FromPodSchedulingContexts(scheduling).
|
||||
PotentialNodes(workerNode.Name).
|
||||
Obj()
|
||||
schedulingSelectedPotential = st.FromPodScheduling(schedulingPotential).
|
||||
schedulingSelectedPotential = st.FromPodSchedulingContexts(schedulingPotential).
|
||||
SelectedNode(workerNode.Name).
|
||||
Obj()
|
||||
schedulingInfo = st.FromPodScheduling(schedulingPotential).
|
||||
schedulingInfo = st.FromPodSchedulingContexts(schedulingPotential).
|
||||
ResourceClaims(resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName},
|
||||
resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName2}).
|
||||
Obj()
|
||||
@@ -160,7 +160,7 @@ type result struct {
|
||||
// functions will get called for all objects of that type. If they needs to
|
||||
// make changes only to a particular instance, then it must check the name.
|
||||
type change struct {
|
||||
scheduling func(*resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling
|
||||
scheduling func(*resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext
|
||||
claim func(*resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim
|
||||
}
|
||||
type perNodeResult map[string]result
|
||||
@@ -203,7 +203,7 @@ func TestPlugin(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
claims []*resourcev1alpha2.ResourceClaim
|
||||
classes []*resourcev1alpha2.ResourceClass
|
||||
schedulings []*resourcev1alpha2.PodScheduling
|
||||
schedulings []*resourcev1alpha2.PodSchedulingContext
|
||||
|
||||
prepare prepare
|
||||
want want
|
||||
@@ -269,7 +269,7 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-select-immediately": {
|
||||
// Create the PodScheduling object, ask for information
|
||||
// Create the PodSchedulingContext object, ask for information
|
||||
// and select a node.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
@@ -282,7 +282,7 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-ask": {
|
||||
// Create the PodScheduling object, ask for
|
||||
// Create the PodSchedulingContext object, ask for
|
||||
// information, but do not select a node because
|
||||
// there are multiple claims.
|
||||
pod: podWithTwoClaimNames,
|
||||
@@ -296,18 +296,18 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-finish": {
|
||||
// Use the populated PodScheduling object to select a
|
||||
// Use the populated PodSchedulingContext object to select a
|
||||
// node.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `waiting for resource driver to allocate resource`),
|
||||
changes: change{
|
||||
scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
return st.FromPodScheduling(in).
|
||||
scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
return st.FromPodSchedulingContexts(in).
|
||||
SelectedNode(workerNode.Name).
|
||||
Obj()
|
||||
},
|
||||
@@ -316,19 +316,19 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-finish-concurrent-label-update": {
|
||||
// Use the populated PodScheduling object to select a
|
||||
// Use the populated PodSchedulingContext object to select a
|
||||
// node.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
prepare: prepare{
|
||||
reserve: change{
|
||||
scheduling: func(in *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
// This does not actually conflict with setting the
|
||||
// selected node, but because the plugin is not using
|
||||
// patching yet, Update nonetheless fails.
|
||||
return st.FromPodScheduling(in).
|
||||
return st.FromPodSchedulingContexts(in).
|
||||
Label("hello", "world").
|
||||
Obj()
|
||||
},
|
||||
@@ -341,10 +341,10 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
"delayed-allocation-scheduling-completed": {
|
||||
// Remove PodScheduling object once the pod is scheduled.
|
||||
// Remove PodSchedulingContext object once the pod is scheduled.
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{schedulingInfo},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
@@ -366,7 +366,7 @@ func TestPlugin(t *testing.T) {
|
||||
pod: otherPodWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{inUseClaim},
|
||||
classes: []*resourcev1alpha2.ResourceClass{},
|
||||
schedulings: []*resourcev1alpha2.PodScheduling{},
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{},
|
||||
prepare: prepare{},
|
||||
want: want{
|
||||
prefilter: result{
|
||||
@@ -591,7 +591,7 @@ func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
|
||||
for _, claim := range claims.Items {
|
||||
objects = append(objects, &claim)
|
||||
}
|
||||
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulings("").List(tc.ctx, metav1.ListOptions{})
|
||||
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list pod scheduling")
|
||||
for _, scheduling := range schedulings.Items {
|
||||
objects = append(objects, &scheduling)
|
||||
@@ -615,8 +615,8 @@ func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, up
|
||||
t.Fatalf("unexpected error during prepare update: %v", err)
|
||||
}
|
||||
modified[i] = obj
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
obj, err := tc.client.ResourceV1alpha2().PodSchedulings(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
obj, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during prepare update: %v", err)
|
||||
}
|
||||
@@ -650,7 +650,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje
|
||||
if updates.claim != nil {
|
||||
obj = updates.claim(in)
|
||||
}
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
if updates.scheduling != nil {
|
||||
obj = updates.scheduling(in)
|
||||
}
|
||||
@@ -661,7 +661,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje
|
||||
return updated
|
||||
}
|
||||
|
||||
func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodScheduling) (result *testContext) {
|
||||
func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodSchedulingContext) (result *testContext) {
|
||||
t.Helper()
|
||||
|
||||
tc := &testContext{}
|
||||
@@ -702,7 +702,7 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceCl
|
||||
require.NoError(t, err, "create resource class")
|
||||
}
|
||||
for _, scheduling := range schedulings {
|
||||
_, err := tc.client.ResourceV1alpha2().PodSchedulings(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{})
|
||||
_, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{})
|
||||
require.NoError(t, err, "create pod scheduling")
|
||||
}
|
||||
|
||||
|
@@ -69,7 +69,7 @@ const (
|
||||
Node GVK = "Node"
|
||||
PersistentVolume GVK = "PersistentVolume"
|
||||
PersistentVolumeClaim GVK = "PersistentVolumeClaim"
|
||||
PodScheduling GVK = "PodScheduling"
|
||||
PodSchedulingContext GVK = "PodSchedulingContext"
|
||||
ResourceClaim GVK = "ResourceClaim"
|
||||
StorageClass GVK = "storage.k8s.io/StorageClass"
|
||||
CSINode GVK = "storage.k8s.io/CSINode"
|
||||
|
@@ -925,22 +925,24 @@ func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourcev1alpha2.R
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// PodSchedulingWrapper wraps a PodScheduling inside.
|
||||
type PodSchedulingWrapper struct{ resourcev1alpha2.PodScheduling }
|
||||
|
||||
// MakePodScheduling creates a PodScheduling wrapper.
|
||||
func MakePodScheduling() *PodSchedulingWrapper {
|
||||
return &PodSchedulingWrapper{resourcev1alpha2.PodScheduling{}}
|
||||
// PodSchedulingWrapper wraps a PodSchedulingContext inside.
|
||||
type PodSchedulingWrapper struct {
|
||||
resourcev1alpha2.PodSchedulingContext
|
||||
}
|
||||
|
||||
// FromPodScheduling creates a PodScheduling wrapper from some existing object.
|
||||
func FromPodScheduling(other *resourcev1alpha2.PodScheduling) *PodSchedulingWrapper {
|
||||
// MakePodSchedulingContext creates a PodSchedulingContext wrapper.
|
||||
func MakePodSchedulingContexts() *PodSchedulingWrapper {
|
||||
return &PodSchedulingWrapper{resourcev1alpha2.PodSchedulingContext{}}
|
||||
}
|
||||
|
||||
// FromPodSchedulingContext creates a PodSchedulingContext wrapper from some existing object.
|
||||
func FromPodSchedulingContexts(other *resourcev1alpha2.PodSchedulingContext) *PodSchedulingWrapper {
|
||||
return &PodSchedulingWrapper{*other.DeepCopy()}
|
||||
}
|
||||
|
||||
// Obj returns the inner object.
|
||||
func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodScheduling {
|
||||
return &wrapper.PodScheduling
|
||||
func (wrapper *PodSchedulingWrapper) Obj() *resourcev1alpha2.PodSchedulingContext {
|
||||
return &wrapper.PodSchedulingContext
|
||||
}
|
||||
|
||||
// Name sets `s` as the name of the inner object.
|
||||
|
Reference in New Issue
Block a user