dra api: rename NodeResourceSlice -> ResourceSlice

While currently those objects only get published by the kubelet for node-local
resources, this could change once we also support network-attached
resources. Dropping the "Node" prefix enables such a future extension.

The NodeName in ResourceSlice and StructuredResourceHandle then becomes
optional. The kubelet still needs to provide one and it must match its own node
name, otherwise it doesn't have permission to access ResourceSlice objects.
This commit is contained in:
Patrick Ohly
2024-03-07 10:14:11 +01:00
parent 42ee56f093
commit 0b6a0d686a
60 changed files with 3868 additions and 3859 deletions

View File

@@ -1891,26 +1891,6 @@
{
"freshness": "Current",
"resources": [
{
"resource": "noderesourceslices",
"responseKind": {
"group": "",
"kind": "NodeResourceSlice",
"version": ""
},
"scope": "Cluster",
"singularResource": "noderesourceslice",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
},
{
"resource": "podschedulingcontexts",
"responseKind": {
@@ -2060,6 +2040,26 @@
"update",
"watch"
]
},
{
"resource": "resourceslices",
"responseKind": {
"group": "",
"kind": "ResourceSlice",
"version": ""
},
"scope": "Cluster",
"singularResource": "resourceslice",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
],
"version": "v1alpha2"

View File

@@ -3,23 +3,6 @@
"groupVersion": "resource.k8s.io/v1alpha2",
"kind": "APIResourceList",
"resources": [
{
"kind": "NodeResourceSlice",
"name": "noderesourceslices",
"namespaced": false,
"singularName": "noderesourceslice",
"storageVersionHash": "KmjmPdo2jrQ=",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
},
{
"kind": "PodSchedulingContext",
"name": "podschedulingcontexts",
@@ -143,6 +126,23 @@
"update",
"watch"
]
},
{
"kind": "ResourceSlice",
"name": "resourceslices",
"namespaced": false,
"singularName": "resourceslice",
"storageVersionHash": "IECvOcO76kw=",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -60,8 +60,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimTemplateList{},
&PodSchedulingContext{},
&PodSchedulingContextList{},
&NodeResourceSlice{},
&NodeResourceSliceList{},
&ResourceSlice{},
&ResourceSliceList{},
&ResourceClaimParameters{},
&ResourceClaimParametersList{},
&ResourceClassParameters{},

View File

@@ -205,7 +205,8 @@ type StructuredResourceHandle struct {
// allocated.
VendorClaimParameters runtime.Object
// NodeName is the name of the node providing the necessary resources.
// NodeName is the name of the node providing the necessary resources
// if the resources are local to a node.
NodeName string
// Results lists all allocated driver resources.
@@ -479,20 +480,22 @@ type ResourceClaimTemplateList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeResourceSlice provides information about available
// ResourceSlice provides information about available
// resources on individual nodes.
type NodeResourceSlice struct {
type ResourceSlice struct {
metav1.TypeMeta
// Standard object metadata
metav1.ObjectMeta
// NodeName identifies the node where the capacity is available.
// A field selector can be used to list only NodeResourceSlice
// NodeName identifies the node which provides the resources
// if they are local to a node.
//
// A field selector can be used to list only ResourceSlice
// objects with a certain node name.
NodeName string
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only NodeResourceSlice
// A field selector can be used to list only ResourceSlice
// objects with a certain driver name.
DriverName string
@@ -507,14 +510,14 @@ type NodeResourceModel struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeResourceSliceList is a collection of NodeResourceSlices.
type NodeResourceSliceList struct {
// ResourceSliceList is a collection of ResourceSlices.
type ResourceSliceList struct {
metav1.TypeMeta
// Standard list metadata
metav1.ListMeta
// Items is the list of node resource capacity objects.
Items []NodeResourceSlice
Items []ResourceSlice
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@@ -23,13 +23,13 @@ import (
)
func addConversionFuncs(scheme *runtime.Scheme) error {
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("NodeResourceSlice"),
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ResourceSlice"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "nodeName", "driverName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for %s: %s", SchemeGroupVersion.WithKind("NodeResourceSlice"), label)
return "", "", fmt.Errorf("field label not supported for %s: %s", SchemeGroupVersion.WithKind("ResourceSlice"), label)
}
}); err != nil {
return err

View File

@@ -181,26 +181,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha2.NodeResourceSlice)(nil), (*resource.NodeResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_NodeResourceSlice_To_resource_NodeResourceSlice(a.(*v1alpha2.NodeResourceSlice), b.(*resource.NodeResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.NodeResourceSlice)(nil), (*v1alpha2.NodeResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_NodeResourceSlice_To_v1alpha2_NodeResourceSlice(a.(*resource.NodeResourceSlice), b.(*v1alpha2.NodeResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha2.NodeResourceSliceList)(nil), (*resource.NodeResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_NodeResourceSliceList_To_resource_NodeResourceSliceList(a.(*v1alpha2.NodeResourceSliceList), b.(*resource.NodeResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.NodeResourceSliceList)(nil), (*v1alpha2.NodeResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_NodeResourceSliceList_To_v1alpha2_NodeResourceSliceList(a.(*resource.NodeResourceSliceList), b.(*v1alpha2.NodeResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha2.PodSchedulingContext)(nil), (*resource.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(a.(*v1alpha2.PodSchedulingContext), b.(*resource.PodSchedulingContext), scope)
}); err != nil {
@@ -461,6 +441,26 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceSlice)(nil), (*resource.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ResourceSlice_To_resource_ResourceSlice(a.(*v1alpha2.ResourceSlice), b.(*resource.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSlice)(nil), (*v1alpha2.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSlice_To_v1alpha2_ResourceSlice(a.(*resource.ResourceSlice), b.(*v1alpha2.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha2.ResourceSliceList)(nil), (*resource.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ResourceSliceList_To_resource_ResourceSliceList(a.(*v1alpha2.ResourceSliceList), b.(*resource.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*resource.ResourceSliceList)(nil), (*v1alpha2.ResourceSliceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_resource_ResourceSliceList_To_v1alpha2_ResourceSliceList(a.(*resource.ResourceSliceList), b.(*v1alpha2.ResourceSliceList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha2.StructuredResourceHandle)(nil), (*resource.StructuredResourceHandle)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_StructuredResourceHandle_To_resource_StructuredResourceHandle(a.(*v1alpha2.StructuredResourceHandle), b.(*resource.StructuredResourceHandle), scope)
}); err != nil {
@@ -846,58 +846,6 @@ func Convert_resource_NodeResourceModel_To_v1alpha2_NodeResourceModel(in *resour
return autoConvert_resource_NodeResourceModel_To_v1alpha2_NodeResourceModel(in, out, s)
}
func autoConvert_v1alpha2_NodeResourceSlice_To_resource_NodeResourceSlice(in *v1alpha2.NodeResourceSlice, out *resource.NodeResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeName = in.NodeName
out.DriverName = in.DriverName
if err := Convert_v1alpha2_NodeResourceModel_To_resource_NodeResourceModel(&in.NodeResourceModel, &out.NodeResourceModel, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_NodeResourceSlice_To_resource_NodeResourceSlice is an autogenerated conversion function.
func Convert_v1alpha2_NodeResourceSlice_To_resource_NodeResourceSlice(in *v1alpha2.NodeResourceSlice, out *resource.NodeResourceSlice, s conversion.Scope) error {
return autoConvert_v1alpha2_NodeResourceSlice_To_resource_NodeResourceSlice(in, out, s)
}
func autoConvert_resource_NodeResourceSlice_To_v1alpha2_NodeResourceSlice(in *resource.NodeResourceSlice, out *v1alpha2.NodeResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeName = in.NodeName
out.DriverName = in.DriverName
if err := Convert_resource_NodeResourceModel_To_v1alpha2_NodeResourceModel(&in.NodeResourceModel, &out.NodeResourceModel, s); err != nil {
return err
}
return nil
}
// Convert_resource_NodeResourceSlice_To_v1alpha2_NodeResourceSlice is an autogenerated conversion function.
func Convert_resource_NodeResourceSlice_To_v1alpha2_NodeResourceSlice(in *resource.NodeResourceSlice, out *v1alpha2.NodeResourceSlice, s conversion.Scope) error {
return autoConvert_resource_NodeResourceSlice_To_v1alpha2_NodeResourceSlice(in, out, s)
}
func autoConvert_v1alpha2_NodeResourceSliceList_To_resource_NodeResourceSliceList(in *v1alpha2.NodeResourceSliceList, out *resource.NodeResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.NodeResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha2_NodeResourceSliceList_To_resource_NodeResourceSliceList is an autogenerated conversion function.
func Convert_v1alpha2_NodeResourceSliceList_To_resource_NodeResourceSliceList(in *v1alpha2.NodeResourceSliceList, out *resource.NodeResourceSliceList, s conversion.Scope) error {
return autoConvert_v1alpha2_NodeResourceSliceList_To_resource_NodeResourceSliceList(in, out, s)
}
func autoConvert_resource_NodeResourceSliceList_To_v1alpha2_NodeResourceSliceList(in *resource.NodeResourceSliceList, out *v1alpha2.NodeResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha2.NodeResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_NodeResourceSliceList_To_v1alpha2_NodeResourceSliceList is an autogenerated conversion function.
func Convert_resource_NodeResourceSliceList_To_v1alpha2_NodeResourceSliceList(in *resource.NodeResourceSliceList, out *v1alpha2.NodeResourceSliceList, s conversion.Scope) error {
return autoConvert_resource_NodeResourceSliceList_To_v1alpha2_NodeResourceSliceList(in, out, s)
}
func autoConvert_v1alpha2_PodSchedulingContext_To_resource_PodSchedulingContext(in *v1alpha2.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha2_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil {
@@ -1668,6 +1616,58 @@ func Convert_resource_ResourceRequestModel_To_v1alpha2_ResourceRequestModel(in *
return autoConvert_resource_ResourceRequestModel_To_v1alpha2_ResourceRequestModel(in, out, s)
}
func autoConvert_v1alpha2_ResourceSlice_To_resource_ResourceSlice(in *v1alpha2.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeName = in.NodeName
out.DriverName = in.DriverName
if err := Convert_v1alpha2_NodeResourceModel_To_resource_NodeResourceModel(&in.NodeResourceModel, &out.NodeResourceModel, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_ResourceSlice_To_resource_ResourceSlice is an autogenerated conversion function.
func Convert_v1alpha2_ResourceSlice_To_resource_ResourceSlice(in *v1alpha2.ResourceSlice, out *resource.ResourceSlice, s conversion.Scope) error {
return autoConvert_v1alpha2_ResourceSlice_To_resource_ResourceSlice(in, out, s)
}
func autoConvert_resource_ResourceSlice_To_v1alpha2_ResourceSlice(in *resource.ResourceSlice, out *v1alpha2.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
out.NodeName = in.NodeName
out.DriverName = in.DriverName
if err := Convert_resource_NodeResourceModel_To_v1alpha2_NodeResourceModel(&in.NodeResourceModel, &out.NodeResourceModel, s); err != nil {
return err
}
return nil
}
// Convert_resource_ResourceSlice_To_v1alpha2_ResourceSlice is an autogenerated conversion function.
func Convert_resource_ResourceSlice_To_v1alpha2_ResourceSlice(in *resource.ResourceSlice, out *v1alpha2.ResourceSlice, s conversion.Scope) error {
return autoConvert_resource_ResourceSlice_To_v1alpha2_ResourceSlice(in, out, s)
}
func autoConvert_v1alpha2_ResourceSliceList_To_resource_ResourceSliceList(in *v1alpha2.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]resource.ResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha2_ResourceSliceList_To_resource_ResourceSliceList is an autogenerated conversion function.
func Convert_v1alpha2_ResourceSliceList_To_resource_ResourceSliceList(in *v1alpha2.ResourceSliceList, out *resource.ResourceSliceList, s conversion.Scope) error {
return autoConvert_v1alpha2_ResourceSliceList_To_resource_ResourceSliceList(in, out, s)
}
func autoConvert_resource_ResourceSliceList_To_v1alpha2_ResourceSliceList(in *resource.ResourceSliceList, out *v1alpha2.ResourceSliceList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]v1alpha2.ResourceSlice)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_resource_ResourceSliceList_To_v1alpha2_ResourceSliceList is an autogenerated conversion function.
func Convert_resource_ResourceSliceList_To_v1alpha2_ResourceSliceList(in *resource.ResourceSliceList, out *v1alpha2.ResourceSliceList, s conversion.Scope) error {
return autoConvert_resource_ResourceSliceList_To_v1alpha2_ResourceSliceList(in, out, s)
}
func autoConvert_v1alpha2_StructuredResourceHandle_To_resource_StructuredResourceHandle(in *v1alpha2.StructuredResourceHandle, out *resource.StructuredResourceHandle, s conversion.Scope) error {
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.VendorClassParameters, &out.VendorClassParameters, s); err != nil {
return err

View File

@@ -220,7 +220,9 @@ func validateResourceHandles(resourceHandles []resource.ResourceHandle, maxSize
func validateStructuredResourceHandle(handle *resource.StructuredResourceHandle, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
allErrs = append(allErrs, validateNodeName(handle.NodeName, fldPath.Child("nodeName"))...)
if handle.NodeName != "" {
allErrs = append(allErrs, validateNodeName(handle.NodeName, fldPath.Child("nodeName"))...)
}
allErrs = append(allErrs, validateDriverAllocationResults(handle.Results, fldPath.Child("results"))...)
return allErrs
}
@@ -388,12 +390,14 @@ func validateNodeName(name string, fldPath *field.Path) field.ErrorList {
return allErrs
}
// ValidateNodeResourceSlice tests if a NodeResourceSlice object is valid.
func ValidateNodeResourceSlice(nodeResourceSlice *resource.NodeResourceSlice) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&nodeResourceSlice.ObjectMeta, false, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
allErrs = append(allErrs, validateNodeName(nodeResourceSlice.NodeName, field.NewPath("nodeName"))...)
allErrs = append(allErrs, validateResourceDriverName(nodeResourceSlice.DriverName, field.NewPath("driverName"))...)
allErrs = append(allErrs, validateNodeResourceModel(&nodeResourceSlice.NodeResourceModel, nil)...)
// ValidateResourceSlice tests if a ResourceSlice object is valid.
func ValidateResourceSlice(resourceSlice *resource.ResourceSlice) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceSlice.ObjectMeta, false, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
if resourceSlice.NodeName != "" {
allErrs = append(allErrs, validateNodeName(resourceSlice.NodeName, field.NewPath("nodeName"))...)
}
allErrs = append(allErrs, validateResourceDriverName(resourceSlice.DriverName, field.NewPath("driverName"))...)
allErrs = append(allErrs, validateNodeResourceModel(&resourceSlice.NodeResourceModel, nil)...)
return allErrs
}
@@ -415,12 +419,12 @@ func validateNodeResourceModel(model *resource.NodeResourceModel, fldPath *field
return allErrs
}
// ValidateNodeResourceSlice tests if a NodeResourceSlice update is valid.
func ValidateNodeResourceSliceUpdate(nodeResourceSlice, oldNodeResourceSlice *resource.NodeResourceSlice) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&nodeResourceSlice.ObjectMeta, &oldNodeResourceSlice.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateNodeResourceSlice(nodeResourceSlice)...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(nodeResourceSlice.NodeName, oldNodeResourceSlice.NodeName, field.NewPath("nodeName"))...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(nodeResourceSlice.DriverName, oldNodeResourceSlice.DriverName, field.NewPath("driverName"))...)
// ValidateResourceSlice tests if a ResourceSlice update is valid.
func ValidateResourceSliceUpdate(resourceSlice, oldResourceSlice *resource.ResourceSlice) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceSlice.ObjectMeta, &oldResourceSlice.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateResourceSlice(resourceSlice)...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(resourceSlice.NodeName, oldResourceSlice.NodeName, field.NewPath("nodeName"))...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(resourceSlice.DriverName, oldResourceSlice.DriverName, field.NewPath("driverName"))...)
return allErrs
}

View File

@@ -387,42 +387,8 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{
NodeName: "worker",
},
},
},
}
return claim
},
},
"invalid-add-allocation-structured": {
wantFailures: field.ErrorList{
field.Invalid(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("structuredData", "nodeName"), "", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
field.Required(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("structuredData", "results").Index(1), "exactly one structured model field must be set"),
},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{
Results: []resource.DriverAllocationResult{
{
AllocationResultModel: resource.AllocationResultModel{
NamedResources: &resource.NamedResourcesAllocationResult{
Name: "some-resource-instance",
},
},
},
{
AllocationResultModel: resource.AllocationResultModel{}, // invalid
},
},
},
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{},
},
},
}
@@ -446,6 +412,39 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
return claim
},
},
"invalid-add-allocation-structured": {
wantFailures: field.ErrorList{
field.Invalid(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("structuredData", "nodeName"), "&^!", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
field.Required(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("structuredData", "results").Index(1), "exactly one structured model field must be set"),
},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{
NodeName: "&^!",
Results: []resource.DriverAllocationResult{
{
AllocationResultModel: resource.AllocationResultModel{
NamedResources: &resource.NamedResourcesAllocationResult{
Name: "some-resource-instance",
},
},
},
{
AllocationResultModel: resource.AllocationResultModel{}, // invalid
},
},
},
},
},
}
return claim
},
},
"invalid-duplicated-data": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "allocation", "resourceHandles").Index(0), nil, "data and structuredData are mutually exclusive")},
oldClaim: validClaim,

View File

@@ -27,8 +27,8 @@ import (
"k8s.io/utils/ptr"
)
func testNodeResourceSlice(name, nodeName, driverName string) *resource.NodeResourceSlice {
return &resource.NodeResourceSlice{
func testResourceSlice(name, nodeName, driverName string) *resource.ResourceSlice {
return &resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
@@ -40,7 +40,7 @@ func testNodeResourceSlice(name, nodeName, driverName string) *resource.NodeReso
}
}
func TestValidateNodeResourceSlice(t *testing.T) {
func TestValidateResourceSlice(t *testing.T) {
goodName := "foo"
badName := "!@#$%^"
driverName := "test.example.com"
@@ -48,65 +48,65 @@ func TestValidateNodeResourceSlice(t *testing.T) {
badValue := "spaces not allowed"
scenarios := map[string]struct {
slice *resource.NodeResourceSlice
slice *resource.ResourceSlice
wantFailures field.ErrorList
}{
"good": {
slice: testNodeResourceSlice(goodName, goodName, driverName),
slice: testResourceSlice(goodName, goodName, driverName),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
slice: testNodeResourceSlice("", goodName, driverName),
slice: testResourceSlice("", goodName, driverName),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
slice: testNodeResourceSlice(badName, goodName, driverName),
slice: testResourceSlice(badName, goodName, driverName),
},
"generate-name": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.GenerateName = "prefix-"
return slice
}(),
},
"uid": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return slice
}(),
},
"resource-version": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.ResourceVersion = "1"
return slice
}(),
},
"generation": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Generation = 100
return slice
}(),
},
"creation-timestamp": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.CreationTimestamp = now
return slice
}(),
},
"deletion-grace-period-seconds": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.DeletionGracePeriodSeconds = ptr.To[int64](10)
return slice
}(),
},
"owner-references": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
@@ -119,8 +119,8 @@ func TestValidateNodeResourceSlice(t *testing.T) {
}(),
},
"finalizers": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Finalizers = []string{
"example.com/foo",
}
@@ -128,8 +128,8 @@ func TestValidateNodeResourceSlice(t *testing.T) {
}(),
},
"managed-fields": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
@@ -142,8 +142,8 @@ func TestValidateNodeResourceSlice(t *testing.T) {
}(),
},
"good-labels": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
@@ -152,8 +152,8 @@ func TestValidateNodeResourceSlice(t *testing.T) {
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Labels = map[string]string{
"hello-world": badValue,
}
@@ -161,8 +161,8 @@ func TestValidateNodeResourceSlice(t *testing.T) {
}(),
},
"good-annotations": {
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Annotations = map[string]string{
"foo": "bar",
}
@@ -171,8 +171,8 @@ func TestValidateNodeResourceSlice(t *testing.T) {
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Annotations = map[string]string{
badName: "hello world",
}
@@ -181,17 +181,17 @@ func TestValidateNodeResourceSlice(t *testing.T) {
},
"bad-nodename": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("nodeName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
slice: testNodeResourceSlice(goodName, badName, driverName),
slice: testResourceSlice(goodName, badName, driverName),
},
"bad-drivername": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
slice: testNodeResourceSlice(goodName, goodName, badName),
slice: testResourceSlice(goodName, goodName, badName),
},
"empty-model": {
wantFailures: field.ErrorList{field.Required(nil, "exactly one structured model field must be set")},
slice: func() *resource.NodeResourceSlice {
slice := testNodeResourceSlice(goodName, goodName, driverName)
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.NodeResourceModel = resource.NodeResourceModel{}
return slice
}(),
@@ -200,45 +200,45 @@ func TestValidateNodeResourceSlice(t *testing.T) {
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateNodeResourceSlice(scenario.slice)
errs := ValidateResourceSlice(scenario.slice)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateNodeResourceSliceUpdate(t *testing.T) {
func TestValidateResourceSliceUpdate(t *testing.T) {
name := "valid"
validNodeResourceSlice := testNodeResourceSlice(name, name, name)
validResourceSlice := testResourceSlice(name, name, name)
scenarios := map[string]struct {
oldNodeResourceSlice *resource.NodeResourceSlice
update func(slice *resource.NodeResourceSlice) *resource.NodeResourceSlice
wantFailures field.ErrorList
oldResourceSlice *resource.ResourceSlice
update func(slice *resource.ResourceSlice) *resource.ResourceSlice
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldNodeResourceSlice: validNodeResourceSlice,
update: func(slice *resource.NodeResourceSlice) *resource.NodeResourceSlice { return slice },
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice { return slice },
},
"invalid-name-update": {
oldNodeResourceSlice: validNodeResourceSlice,
update: func(slice *resource.NodeResourceSlice) *resource.NodeResourceSlice {
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice {
slice.Name += "-update"
return slice
},
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), name+"-update", "field is immutable")},
},
"invalid-update-nodename": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("nodeName"), name+"-updated", "field is immutable")},
oldNodeResourceSlice: validNodeResourceSlice,
update: func(slice *resource.NodeResourceSlice) *resource.NodeResourceSlice {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("nodeName"), name+"-updated", "field is immutable")},
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice {
slice.NodeName += "-updated"
return slice
},
},
"invalid-update-drivername": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), name+"-updated", "field is immutable")},
oldNodeResourceSlice: validNodeResourceSlice,
update: func(slice *resource.NodeResourceSlice) *resource.NodeResourceSlice {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), name+"-updated", "field is immutable")},
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice {
slice.DriverName += "-updated"
return slice
},
@@ -247,8 +247,8 @@ func TestValidateNodeResourceSliceUpdate(t *testing.T) {
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldNodeResourceSlice.ResourceVersion = "1"
errs := ValidateNodeResourceSliceUpdate(scenario.update(scenario.oldNodeResourceSlice.DeepCopy()), scenario.oldNodeResourceSlice)
scenario.oldResourceSlice.ResourceVersion = "1"
errs := ValidateResourceSliceUpdate(scenario.update(scenario.oldResourceSlice.DeepCopy()), scenario.oldResourceSlice)
assert.Equal(t, scenario.wantFailures, errs)
})
}

View File

@@ -346,66 +346,6 @@ func (in *NodeResourceModel) DeepCopy() *NodeResourceModel {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceSlice) DeepCopyInto(out *NodeResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.NodeResourceModel.DeepCopyInto(&out.NodeResourceModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceSlice.
func (in *NodeResourceSlice) DeepCopy() *NodeResourceSlice {
if in == nil {
return nil
}
out := new(NodeResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceSliceList) DeepCopyInto(out *NodeResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NodeResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceSliceList.
func (in *NodeResourceSliceList) DeepCopy() *NodeResourceSliceList {
if in == nil {
return nil
}
out := new(NodeResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
*out = *in
@@ -1089,6 +1029,66 @@ func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.NodeResourceModel.DeepCopyInto(&out.NodeResourceModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
func (in *ResourceSlice) DeepCopy() *ResourceSlice {
if in == nil {
return nil
}
out := new(ResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
if in == nil {
return nil
}
out := new(ResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) {
*out = *in

View File

@@ -881,8 +881,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"k8s.io/api/resource/v1alpha2.NamedResourcesResources": schema_k8sio_api_resource_v1alpha2_NamedResourcesResources(ref),
"k8s.io/api/resource/v1alpha2.NamedResourcesStringSlice": schema_k8sio_api_resource_v1alpha2_NamedResourcesStringSlice(ref),
"k8s.io/api/resource/v1alpha2.NodeResourceModel": schema_k8sio_api_resource_v1alpha2_NodeResourceModel(ref),
"k8s.io/api/resource/v1alpha2.NodeResourceSlice": schema_k8sio_api_resource_v1alpha2_NodeResourceSlice(ref),
"k8s.io/api/resource/v1alpha2.NodeResourceSliceList": schema_k8sio_api_resource_v1alpha2_NodeResourceSliceList(ref),
"k8s.io/api/resource/v1alpha2.PodSchedulingContext": schema_k8sio_api_resource_v1alpha2_PodSchedulingContext(ref),
"k8s.io/api/resource/v1alpha2.PodSchedulingContextList": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextList(ref),
"k8s.io/api/resource/v1alpha2.PodSchedulingContextSpec": schema_k8sio_api_resource_v1alpha2_PodSchedulingContextSpec(ref),
@@ -909,6 +907,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"k8s.io/api/resource/v1alpha2.ResourceHandle": schema_k8sio_api_resource_v1alpha2_ResourceHandle(ref),
"k8s.io/api/resource/v1alpha2.ResourceRequest": schema_k8sio_api_resource_v1alpha2_ResourceRequest(ref),
"k8s.io/api/resource/v1alpha2.ResourceRequestModel": schema_k8sio_api_resource_v1alpha2_ResourceRequestModel(ref),
"k8s.io/api/resource/v1alpha2.ResourceSlice": schema_k8sio_api_resource_v1alpha2_ResourceSlice(ref),
"k8s.io/api/resource/v1alpha2.ResourceSliceList": schema_k8sio_api_resource_v1alpha2_ResourceSliceList(ref),
"k8s.io/api/resource/v1alpha2.StructuredResourceHandle": schema_k8sio_api_resource_v1alpha2_StructuredResourceHandle(ref),
"k8s.io/api/resource/v1alpha2.VendorParameters": schema_k8sio_api_resource_v1alpha2_VendorParameters(ref),
"k8s.io/api/scheduling/v1.PriorityClass": schema_k8sio_api_scheduling_v1_PriorityClass(ref),
@@ -45195,116 +45195,6 @@ func schema_k8sio_api_resource_v1alpha2_NodeResourceModel(ref common.ReferenceCa
}
}
func schema_k8sio_api_resource_v1alpha2_NodeResourceSlice(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeResourceSlice provides information about available resources on individual nodes.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"nodeName": {
SchemaProps: spec.SchemaProps{
Description: "NodeName identifies the node where the capacity is available. A field selector can be used to list only NodeResourceSlice objects with a certain node name.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"driverName": {
SchemaProps: spec.SchemaProps{
Description: "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only NodeResourceSlice objects with a certain driver name.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"namedResources": {
SchemaProps: spec.SchemaProps{
Description: "NamedResources describes available resources using the named resources model.",
Ref: ref("k8s.io/api/resource/v1alpha2.NamedResourcesResources"),
},
},
},
Required: []string{"nodeName", "driverName"},
},
},
Dependencies: []string{
"k8s.io/api/resource/v1alpha2.NamedResourcesResources", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_resource_v1alpha2_NodeResourceSliceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "NodeResourceSliceList is a collection of NodeResourceSlices.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is the list of node resource capacity objects.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/resource/v1alpha2.NodeResourceSlice"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/resource/v1alpha2.NodeResourceSlice", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_resource_v1alpha2_PodSchedulingContext(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -46470,6 +46360,115 @@ func schema_k8sio_api_resource_v1alpha2_ResourceRequestModel(ref common.Referenc
}
}
func schema_k8sio_api_resource_v1alpha2_ResourceSlice(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceSlice provides information about available resources on individual nodes.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"nodeName": {
SchemaProps: spec.SchemaProps{
Description: "NodeName identifies the node which provides the resources if they are local to a node.\n\nA field selector can be used to list only ResourceSlice objects with a certain node name.",
Type: []string{"string"},
Format: "",
},
},
"driverName": {
SchemaProps: spec.SchemaProps{
Description: "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"namedResources": {
SchemaProps: spec.SchemaProps{
Description: "NamedResources describes available resources using the named resources model.",
Ref: ref("k8s.io/api/resource/v1alpha2.NamedResourcesResources"),
},
},
},
Required: []string{"driverName"},
},
},
Dependencies: []string{
"k8s.io/api/resource/v1alpha2.NamedResourcesResources", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_resource_v1alpha2_ResourceSliceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceSliceList is a collection of ResourceSlices.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "Items is the list of node resource capacity objects.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/resource/v1alpha2.ResourceSlice"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/resource/v1alpha2.ResourceSlice", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_resource_v1alpha2_StructuredResourceHandle(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -46491,8 +46490,7 @@ func schema_k8sio_api_resource_v1alpha2_StructuredResourceHandle(ref common.Refe
},
"nodeName": {
SchemaProps: spec.SchemaProps{
Description: "NodeName is the name of the node providing the necessary resources.",
Default: "",
Description: "NodeName is the name of the node providing the necessary resources if the resources are local to a node.",
Type: []string{"string"},
Format: "",
},
@@ -46517,7 +46515,7 @@ func schema_k8sio_api_resource_v1alpha2_StructuredResourceHandle(ref common.Refe
},
},
},
Required: []string{"nodeName", "results"},
Required: []string{"results"},
},
},
Dependencies: []string{

View File

@@ -93,9 +93,9 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho
// Keep cases in sync with constant list in k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go.
switch configuredAuthorizer.Type {
case authzconfig.AuthorizerType(modes.ModeNode):
var slices resourcev1alpha2informers.NodeResourceSliceInformer
var slices resourcev1alpha2informers.ResourceSliceInformer
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
slices = config.VersionedInformerFactory.Resource().V1alpha2().NodeResourceSlices()
slices = config.VersionedInformerFactory.Resource().V1alpha2().ResourceSlices()
}
node.RegisterMetrics()
graph := node.NewGraph()

View File

@@ -48,7 +48,7 @@ const (
)
// nodeResourcesController collects resource information from all registered
// plugins and synchronizes that information with NodeResourceSlice objects.
// plugins and synchronizes that information with ResourceSlice objects.
type nodeResourcesController struct {
ctx context.Context
kubeClient kubernetes.Interface
@@ -79,11 +79,11 @@ type activePlugin struct {
// startNodeResourcesController constructs a new controller and starts it.
//
// If a kubeClient is provided, then it synchronizes NodeResourceSlices
// If a kubeClient is provided, then it synchronizes ResourceSlices
// with the resource information provided by plugins. Without it,
// the controller is inactive. This can happen when kubelet is run stand-alone
// without an apiserver. In that case we can't and don't need to publish
// NodeResourceSlices.
// ResourceSlices.
func startNodeResourcesController(ctx context.Context, kubeClient kubernetes.Interface, nodeName string) *nodeResourcesController {
if kubeClient == nil {
return nil
@@ -243,49 +243,49 @@ func (c *nodeResourcesController) run(ctx context.Context) {
logger := klog.FromContext(ctx)
// When kubelet starts, we have two choices:
// - Sync immediately, which in practice will delete all NodeResourceSlices
// - Sync immediately, which in practice will delete all ResourceSlices
// because no plugin has registered yet. We could do a DeleteCollection
// to speed this up.
// - Wait a bit, then sync. If all plugins have re-registered in the meantime,
// we might not need to change any NodeResourceSlice.
// we might not need to change any ResourceSlice.
//
// For now syncing starts immediately, with no DeleteCollection. This
// can be reconsidered later.
// While kubelet starts up, there are errors:
// E0226 13:41:19.880621 126334 reflector.go:150] k8s.io/client-go@v0.0.0/tools/cache/reflector.go:232: Failed to watch *v1alpha2.NodeResourceSlice: failed to list *v1alpha2.NodeResourceSlice: noderesourceslices.resource.k8s.io is forbidden: User "system:anonymous" cannot list resource "noderesourceslices" in API group "resource.k8s.io" at the cluster scope
// E0226 13:41:19.880621 126334 reflector.go:150] k8s.io/client-go@v0.0.0/tools/cache/reflector.go:232: Failed to watch *v1alpha2.ResourceSlice: failed to list *v1alpha2.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User "system:anonymous" cannot list resource "resourceslices" in API group "resource.k8s.io" at the cluster scope
//
// The credentials used by kubeClient seem to get swapped out later,
// because eventually these list calls succeed.
// TODO (https://github.com/kubernetes/kubernetes/issues/123691): can we avoid these error log entries? Perhaps wait here?
// We could use an indexer on driver name, but that seems overkill.
informer := resourceinformers.NewFilteredNodeResourceSliceInformer(c.kubeClient, resyncPeriod, nil, func(options *metav1.ListOptions) {
informer := resourceinformers.NewFilteredResourceSliceInformer(c.kubeClient, resyncPeriod, nil, func(options *metav1.ListOptions) {
options.FieldSelector = "nodeName=" + c.nodeName
})
c.sliceStore = informer.GetStore()
handler, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
slice, ok := obj.(*resourceapi.NodeResourceSlice)
slice, ok := obj.(*resourceapi.ResourceSlice)
if !ok {
return
}
logger.V(5).Info("NodeResourceSlice add", "slice", klog.KObj(slice))
logger.V(5).Info("ResourceSlice add", "slice", klog.KObj(slice))
c.queue.Add(slice.DriverName)
},
UpdateFunc: func(old, new any) {
oldSlice, ok := old.(*resourceapi.NodeResourceSlice)
oldSlice, ok := old.(*resourceapi.ResourceSlice)
if !ok {
return
}
newSlice, ok := new.(*resourceapi.NodeResourceSlice)
newSlice, ok := new.(*resourceapi.ResourceSlice)
if !ok {
return
}
if loggerV := logger.V(6); loggerV.Enabled() {
loggerV.Info("NodeResourceSlice update", "slice", klog.KObj(newSlice), "diff", cmp.Diff(oldSlice, newSlice))
loggerV.Info("ResourceSlice update", "slice", klog.KObj(newSlice), "diff", cmp.Diff(oldSlice, newSlice))
} else {
logger.V(5).Info("NodeResourceSlice update", "slice", klog.KObj(newSlice))
logger.V(5).Info("ResourceSlice update", "slice", klog.KObj(newSlice))
}
c.queue.Add(newSlice.DriverName)
},
@@ -293,16 +293,16 @@ func (c *nodeResourcesController) run(ctx context.Context) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
slice, ok := obj.(*resourceapi.NodeResourceSlice)
slice, ok := obj.(*resourceapi.ResourceSlice)
if !ok {
return
}
logger.V(5).Info("NodeResourceSlice delete", "slice", klog.KObj(slice))
logger.V(5).Info("ResourceSlice delete", "slice", klog.KObj(slice))
c.queue.Add(slice.DriverName)
},
})
if err != nil {
logger.Error(err, "Registering event handler on the NodeResourceSlice informer failed, disabling resource monitoring")
logger.Error(err, "Registering event handler on the ResourceSlice informer failed, disabling resource monitoring")
return
}
@@ -319,7 +319,7 @@ func (c *nodeResourcesController) run(ctx context.Context) {
return
}
}
logger.Info("NodeResourceSlice informer has synced")
logger.Info("ResourceSlice informer has synced")
for c.processNextWorkItem(ctx) {
}
@@ -378,11 +378,11 @@ func (c *nodeResourcesController) sync(ctx context.Context, driverName string) e
// Slices that don't match any driver resource can either be updated (if there
// are new driver resources that need to be stored) or they need to be deleted.
obsoleteSlices := make([]*resourceapi.NodeResourceSlice, 0, len(slices))
obsoleteSlices := make([]*resourceapi.ResourceSlice, 0, len(slices))
// Match slices with resource information.
for _, obj := range slices {
slice := obj.(*resourceapi.NodeResourceSlice)
slice := obj.(*resourceapi.ResourceSlice)
if slice.DriverName != driverName {
continue
}
@@ -414,7 +414,7 @@ func (c *nodeResourcesController) sync(ctx context.Context, driverName string) e
// where we publish it.
//
// The long-term goal is to move the handling of
// NodeResourceSlice objects into the driver, with kubelet
// ResourceSlice objects into the driver, with kubelet
// just acting as a REST proxy. The advantage of that will
// be that kubelet won't need to support the same
// resource API version as the driver and the control plane.
@@ -435,14 +435,14 @@ func (c *nodeResourcesController) sync(ctx context.Context, driverName string) e
slice = slice.DeepCopy()
slice.NodeResourceModel = *resource
logger.V(5).Info("Reusing existing node resource slice", "slice", klog.KObj(slice))
if _, err := c.kubeClient.ResourceV1alpha2().NodeResourceSlices().Update(ctx, slice, metav1.UpdateOptions{}); err != nil {
if _, err := c.kubeClient.ResourceV1alpha2().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("update node resource slice: %w", err)
}
continue
}
// Create a new slice.
slice := &resourceapi.NodeResourceSlice{
slice := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
GenerateName: c.nodeName + "-" + driverName + "-",
// TODO (https://github.com/kubernetes/kubernetes/issues/123692): node object as owner
@@ -452,7 +452,7 @@ func (c *nodeResourcesController) sync(ctx context.Context, driverName string) e
NodeResourceModel: *resource,
}
logger.V(5).Info("Creating new node resource slice", "slice", klog.KObj(slice))
if _, err := c.kubeClient.ResourceV1alpha2().NodeResourceSlices().Create(ctx, slice, metav1.CreateOptions{}); err != nil {
if _, err := c.kubeClient.ResourceV1alpha2().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("create node resource slice: %w", err)
}
}
@@ -461,7 +461,7 @@ func (c *nodeResourcesController) sync(ctx context.Context, driverName string) e
for i := 0; i < numObsoleteSlices; i++ {
slice := obsoleteSlices[i]
logger.V(5).Info("Deleting obsolete node resource slice", "slice", klog.KObj(slice))
if err := c.kubeClient.ResourceV1alpha2().NodeResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{}); err != nil {
if err := c.kubeClient.ResourceV1alpha2().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{}); err != nil {
return fmt.Errorf("delete node resource slice: %w", err)
}
}

View File

@@ -102,7 +102,7 @@ type RegistrationHandler struct {
// NewPluginHandler returns new registration handler.
//
// Must only be called once per process because it manages global state.
// If a kubeClient is provided, then it synchronizes NodeResourceSlices
// If a kubeClient is provided, then it synchronizes ResourceSlices
// with the resource information provided by plugins.
func NewRegistrationHandler(kubeClient kubernetes.Interface, nodeName string) *RegistrationHandler {
handler := &RegistrationHandler{}

View File

@@ -674,12 +674,12 @@ func AddHandlers(h printers.PrintHandler) {
nodeResourceCapacityColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "Node", Type: "string", Description: resourcev1alpha2.NodeResourceSlice{}.SwaggerDoc()["nodeName"]},
{Name: "Driver", Type: "string", Description: resourcev1alpha2.NodeResourceSlice{}.SwaggerDoc()["driverName"]},
{Name: "Node", Type: "string", Description: resourcev1alpha2.ResourceSlice{}.SwaggerDoc()["nodeName"]},
{Name: "Driver", Type: "string", Description: resourcev1alpha2.ResourceSlice{}.SwaggerDoc()["driverName"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(nodeResourceCapacityColumnDefinitions, printNodeResourceSlice)
_ = h.TableHandler(nodeResourceCapacityColumnDefinitions, printNodeResourceSliceList)
_ = h.TableHandler(nodeResourceCapacityColumnDefinitions, printResourceSlice)
_ = h.TableHandler(nodeResourceCapacityColumnDefinitions, printResourceSliceList)
serviceCIDRColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
@@ -3121,7 +3121,7 @@ func printResourceClassParametersList(list *resource.ResourceClassParametersList
return rows, nil
}
func printNodeResourceSlice(obj *resource.NodeResourceSlice, options printers.GenerateOptions) ([]metav1.TableRow, error) {
func printResourceSlice(obj *resource.ResourceSlice, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
@@ -3130,10 +3130,10 @@ func printNodeResourceSlice(obj *resource.NodeResourceSlice, options printers.Ge
return []metav1.TableRow{row}, nil
}
func printNodeResourceSliceList(list *resource.NodeResourceSliceList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
func printResourceSliceList(list *resource.ResourceSliceList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printNodeResourceSlice(&list.Items[i], options)
r, err := printResourceSlice(&list.Items[i], options)
if err != nil {
return nil, err
}

View File

@@ -24,35 +24,35 @@ import (
"k8s.io/kubernetes/pkg/printers"
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
"k8s.io/kubernetes/pkg/registry/resource/noderesourceslice"
"k8s.io/kubernetes/pkg/registry/resource/resourceslice"
)
// REST implements a RESTStorage for NodeResourceSlice.
// REST implements a RESTStorage for ResourceSlice.
type REST struct {
*genericregistry.Store
}
// NewREST returns a RESTStorage object that will work against NodeResourceSlice.
// NewREST returns a RESTStorage object that will work against ResourceSlice.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &resource.NodeResourceSlice{} },
NewListFunc: func() runtime.Object { return &resource.NodeResourceSliceList{} },
PredicateFunc: noderesourceslice.Match,
DefaultQualifiedResource: resource.Resource("noderesourceslices"),
SingularQualifiedResource: resource.Resource("noderesourceslice"),
NewFunc: func() runtime.Object { return &resource.ResourceSlice{} },
NewListFunc: func() runtime.Object { return &resource.ResourceSliceList{} },
PredicateFunc: resourceslice.Match,
DefaultQualifiedResource: resource.Resource("resourceslices"),
SingularQualifiedResource: resource.Resource("resourceslice"),
CreateStrategy: noderesourceslice.Strategy,
UpdateStrategy: noderesourceslice.Strategy,
DeleteStrategy: noderesourceslice.Strategy,
CreateStrategy: resourceslice.Strategy,
UpdateStrategy: resourceslice.Strategy,
DeleteStrategy: resourceslice.Strategy,
ReturnDeletedObject: true,
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
}
options := &generic.StoreOptions{
RESTOptions: optsGetter,
AttrFunc: noderesourceslice.GetAttrs,
TriggerFunc: noderesourceslice.TriggerFunc,
Indexers: noderesourceslice.Indexers(),
AttrFunc: resourceslice.GetAttrs,
TriggerFunc: resourceslice.TriggerFunc,
Indexers: resourceslice.Indexers(),
}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err

View File

@@ -37,7 +37,7 @@ func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 1,
ResourcePrefix: "noderesourceslices",
ResourcePrefix: "resourceslices",
}
resourceClassStorage, err := NewREST(restOptions)
if err != nil {
@@ -46,8 +46,8 @@ func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
return resourceClassStorage, server
}
func validNewNodeResourceSlice(name string) *resource.NodeResourceSlice {
return &resource.NodeResourceSlice{
func validNewResourceSlice(name string) *resource.ResourceSlice {
return &resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
@@ -64,13 +64,13 @@ func TestCreate(t *testing.T) {
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
resourceClass := validNewNodeResourceSlice("foo")
resourceClass := validNewResourceSlice("foo")
resourceClass.ObjectMeta = metav1.ObjectMeta{GenerateName: "foo"}
test.TestCreate(
// valid
resourceClass,
// invalid
&resource.NodeResourceSlice{
&resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
},
)
@@ -83,16 +83,16 @@ func TestUpdate(t *testing.T) {
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestUpdate(
// valid
validNewNodeResourceSlice("foo"),
validNewResourceSlice("foo"),
// updateFunc
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.NodeResourceSlice)
object := obj.(*resource.ResourceSlice)
object.Labels = map[string]string{"foo": "bar"}
return object
},
// invalid update
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.NodeResourceSlice)
object := obj.(*resource.ResourceSlice)
object.DriverName = ""
return object
},
@@ -105,7 +105,7 @@ func TestDelete(t *testing.T) {
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope().ReturnDeletedObject()
test.TestDelete(validNewNodeResourceSlice("foo"))
test.TestDelete(validNewResourceSlice("foo"))
}
func TestGet(t *testing.T) {
@@ -113,7 +113,7 @@ func TestGet(t *testing.T) {
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestGet(validNewNodeResourceSlice("foo"))
test.TestGet(validNewResourceSlice("foo"))
}
func TestList(t *testing.T) {
@@ -121,7 +121,7 @@ func TestList(t *testing.T) {
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestList(validNewNodeResourceSlice("foo"))
test.TestList(validNewResourceSlice("foo"))
}
func TestWatch(t *testing.T) {
@@ -130,7 +130,7 @@ func TestWatch(t *testing.T) {
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestWatch(
validNewNodeResourceSlice("foo"),
validNewResourceSlice("foo"),
// matching labels
[]labels.Set{},
// not matching labels

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesourceslice
package resourceslice
import (
"context"
@@ -33,49 +33,49 @@ import (
"k8s.io/kubernetes/pkg/apis/resource/validation"
)
// nodeResourceSliceStrategy implements behavior for NodeResourceSlice objects
type nodeResourceSliceStrategy struct {
// resourceSliceStrategy implements behavior for ResourceSlice objects
type resourceSliceStrategy struct {
runtime.ObjectTyper
names.NameGenerator
}
var Strategy = nodeResourceSliceStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
var Strategy = resourceSliceStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
func (nodeResourceSliceStrategy) NamespaceScoped() bool {
func (resourceSliceStrategy) NamespaceScoped() bool {
return false
}
func (nodeResourceSliceStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
func (resourceSliceStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
}
func (nodeResourceSliceStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
slice := obj.(*resource.NodeResourceSlice)
return validation.ValidateNodeResourceSlice(slice)
func (resourceSliceStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
slice := obj.(*resource.ResourceSlice)
return validation.ValidateResourceSlice(slice)
}
func (nodeResourceSliceStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
func (resourceSliceStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
return nil
}
func (nodeResourceSliceStrategy) Canonicalize(obj runtime.Object) {
func (resourceSliceStrategy) Canonicalize(obj runtime.Object) {
}
func (nodeResourceSliceStrategy) AllowCreateOnUpdate() bool {
func (resourceSliceStrategy) AllowCreateOnUpdate() bool {
return false
}
func (nodeResourceSliceStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
func (resourceSliceStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
}
func (nodeResourceSliceStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateNodeResourceSliceUpdate(obj.(*resource.NodeResourceSlice), old.(*resource.NodeResourceSlice))
func (resourceSliceStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateResourceSliceUpdate(obj.(*resource.ResourceSlice), old.(*resource.ResourceSlice))
}
func (nodeResourceSliceStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
func (resourceSliceStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil
}
func (nodeResourceSliceStrategy) AllowUnconditionalUpdate() bool {
func (resourceSliceStrategy) AllowUnconditionalUpdate() bool {
return true
}
@@ -86,10 +86,10 @@ var TriggerFunc = map[string]storage.IndexerFunc{
}
func nodeNameTriggerFunc(obj runtime.Object) string {
return obj.(*resource.NodeResourceSlice).NodeName
return obj.(*resource.ResourceSlice).NodeName
}
// Indexers returns the indexers for NodeResourceSlice.
// Indexers returns the indexers for ResourceSlice.
func Indexers() *cache.Indexers {
return &cache.Indexers{
storage.FieldIndex("nodeName"): nodeNameIndexFunc,
@@ -97,18 +97,18 @@ func Indexers() *cache.Indexers {
}
func nodeNameIndexFunc(obj interface{}) ([]string, error) {
slice, ok := obj.(*resource.NodeResourceSlice)
slice, ok := obj.(*resource.ResourceSlice)
if !ok {
return nil, fmt.Errorf("not a NodeResourceSlice")
return nil, fmt.Errorf("not a ResourceSlice")
}
return []string{slice.NodeName}, nil
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
slice, ok := obj.(*resource.NodeResourceSlice)
slice, ok := obj.(*resource.ResourceSlice)
if !ok {
return nil, nil, fmt.Errorf("not a NodeResourceSlice")
return nil, nil, fmt.Errorf("not a ResourceSlice")
}
return labels.Set(slice.ObjectMeta.Labels), toSelectableFields(slice), nil
}
@@ -125,7 +125,7 @@ func Match(label labels.Selector, field fields.Selector) storage.SelectionPredic
// toSelectableFields returns a field set that represents the object
// TODO: fields are not labels, and the validation rules for them do not apply.
func toSelectableFields(slice *resource.NodeResourceSlice) fields.Set {
func toSelectableFields(slice *resource.ResourceSlice) fields.Set {
// The purpose of allocation with a given number of elements is to reduce
// amount of allocations needed to create the fields.Set. If you add any
// field here or the number of object-meta related fields changes, this should

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package noderesourceslice
package resourceslice
import (
"testing"
@@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/apis/resource"
)
var slice = &resource.NodeResourceSlice{
var slice = &resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "valid-class",
},
@@ -37,10 +37,10 @@ var slice = &resource.NodeResourceSlice{
func TestClassStrategy(t *testing.T) {
if Strategy.NamespaceScoped() {
t.Errorf("NodeResourceSlice must not be namespace scoped")
t.Errorf("ResourceSlice must not be namespace scoped")
}
if Strategy.AllowCreateOnUpdate() {
t.Errorf("NodeResourceSlice should not allow create on update")
t.Errorf("ResourceSlice should not allow create on update")
}
}

View File

@@ -24,13 +24,13 @@ import (
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource"
noderesourceslicestore "k8s.io/kubernetes/pkg/registry/resource/noderesourceslice/storage"
podschedulingcontextsstore "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext/storage"
resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage"
resourceclaimparametersstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimparameters/storage"
resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage"
resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage"
resourceclassparametersstore "k8s.io/kubernetes/pkg/registry/resource/resourceclassparameters/storage"
resourceslicestore "k8s.io/kubernetes/pkg/registry/resource/resourceslice/storage"
)
type RESTStorageProvider struct{}
@@ -102,12 +102,12 @@ func (p RESTStorageProvider) v1alpha2Storage(apiResourceConfigSource serverstora
storage[resource] = resourceClassParametersStorage
}
if resource := "noderesourceslices"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
nodeResourceSliceStorage, err := noderesourceslicestore.NewREST(restOptionsGetter)
if resource := "resourceslices"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
resourceSliceStorage, err := resourceslicestore.NewREST(restOptionsGetter)
if err != nil {
return nil, err
}
storage[resource] = nodeResourceSliceStorage
storage[resource] = resourceSliceStorage
}
return storage, nil

View File

@@ -277,7 +277,7 @@ type dynamicResources struct {
podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister
claimParametersLister resourcev1alpha2listers.ResourceClaimParametersLister
classParametersLister resourcev1alpha2listers.ResourceClassParametersLister
nodeResourceSliceLister resourcev1alpha2listers.NodeResourceSliceLister
resourceSliceLister resourcev1alpha2listers.ResourceSliceLister
claimNameLookup *resourceclaim.Lookup
// claimAssumeCache enables temporarily storing a newer claim object
@@ -295,7 +295,7 @@ type dynamicResources struct {
// assigned to such a claim. Alternatively, claim allocation state
// could also get tracked across pod scheduling cycles, but that
// - adds complexity (need to carefully sync state with informer events
// for claims and NodeResourceSlices)
// for claims and ResourceSlices)
// - would make integration with cluster autoscaler harder because it would need
// to trigger informer callbacks.
//
@@ -353,7 +353,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
podSchedulingContextLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulingContexts().Lister(),
claimParametersLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaimParameters().Lister(),
classParametersLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClassParameters().Lister(),
nodeResourceSliceLister: fh.SharedInformerFactory().Resource().V1alpha2().NodeResourceSlices().Lister(),
resourceSliceLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceSlices().Lister(),
claimNameLookup: resourceclaim.NewNameLookup(fh.ClientSet()),
claimAssumeCache: volumebinding.NewAssumeCache(logger, fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Informer(), "claim", "", nil),
}
@@ -943,7 +943,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
// problems for using the plugin in the Cluster Autoscaler. If
// this step here turns out to be expensive, we may have to
// maintain and update state more persistently.
resources, err := newResourceModel(logger, pl.nodeResourceSliceLister, pl.claimAssumeCache)
resources, err := newResourceModel(logger, pl.resourceSliceLister, pl.claimAssumeCache)
if err != nil {
return nil, statusError(logger, err)
}

View File

@@ -44,10 +44,10 @@ type resourceModels struct {
// with an unknown structured parameter model silently ignored. An error gets
// logged later when parameters required for a pod depend on such an unknown
// model.
func newResourceModel(logger klog.Logger, nodeResourceSliceLister resourcev1alpha2listers.NodeResourceSliceLister, claimAssumeCache volumebinding.AssumeCache) (resources, error) {
func newResourceModel(logger klog.Logger, resourceSliceLister resourcev1alpha2listers.ResourceSliceLister, claimAssumeCache volumebinding.AssumeCache) (resources, error) {
model := make(resources)
slices, err := nodeResourceSliceLister.List(labels.Everything())
slices, err := resourceSliceLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("list node resource slices: %w", err)
}

View File

@@ -110,13 +110,13 @@ func (p *Plugin) ValidateInitialization() error {
}
var (
podResource = api.Resource("pods")
nodeResource = api.Resource("nodes")
pvcResource = api.Resource("persistentvolumeclaims")
svcacctResource = api.Resource("serviceaccounts")
leaseResource = coordapi.Resource("leases")
csiNodeResource = storage.Resource("csinodes")
nodeResourceSliceResource = resource.Resource("noderesourceslices")
podResource = api.Resource("pods")
nodeResource = api.Resource("nodes")
pvcResource = api.Resource("persistentvolumeclaims")
svcacctResource = api.Resource("serviceaccounts")
leaseResource = coordapi.Resource("leases")
csiNodeResource = storage.Resource("csinodes")
resourceSliceResource = resource.Resource("resourceslices")
)
// Admit checks the admission policy and triggers corresponding actions
@@ -168,8 +168,8 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
case csiNodeResource:
return p.admitCSINode(nodeName, a)
case nodeResourceSliceResource:
return p.admitNodeResourceSlice(nodeName, a)
case resourceSliceResource:
return p.admitResourceSlice(nodeName, a)
default:
return nil
@@ -639,17 +639,17 @@ func (p *Plugin) admitCSINode(nodeName string, a admission.Attributes) error {
return nil
}
func (p *Plugin) admitNodeResourceSlice(nodeName string, a admission.Attributes) error {
func (p *Plugin) admitResourceSlice(nodeName string, a admission.Attributes) error {
// The create request must come from a node with the same name as the NodeName field.
// Other requests gets checked by the node authorizer.
if a.GetOperation() == admission.Create {
slice, ok := a.GetObject().(*resource.NodeResourceSlice)
slice, ok := a.GetObject().(*resource.ResourceSlice)
if !ok {
return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject()))
}
if slice.NodeName != nodeName {
return admission.NewForbidden(a, errors.New("can only create NodeResourceSlice with the same NodeName as the requesting node"))
return admission.NewForbidden(a, errors.New("can only create ResourceSlice with the same NodeName as the requesting node"))
}
}

View File

@@ -1603,19 +1603,19 @@ func createPodAttributes(pod *api.Pod, user user.Info) admission.Attributes {
return admission.NewAttributesRecord(pod, nil, podKind, pod.Namespace, pod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, user)
}
func TestAdmitNodeResourceSlice(t *testing.T) {
apiResource := resourceapi.SchemeGroupVersion.WithResource("noderesourceslices")
func TestAdmitResourceSlice(t *testing.T) {
apiResource := resourceapi.SchemeGroupVersion.WithResource("resourceslices")
nodename := "mynode"
mynode := &user.DefaultInfo{Name: "system:node:" + nodename, Groups: []string{"system:nodes"}}
err := "can only create NodeResourceSlice with the same NodeName as the requesting node"
err := "can only create ResourceSlice with the same NodeName as the requesting node"
sliceNode := &resourceapi.NodeResourceSlice{
sliceNode := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "something",
},
NodeName: nodename,
}
sliceOtherNode := &resourceapi.NodeResourceSlice{
sliceOtherNode := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "something",
},

View File

@@ -127,7 +127,7 @@ const (
var vertexTypes = map[vertexType]string{
configMapVertexType: "configmap",
sliceVertexType: "noderesourceslice",
sliceVertexType: "resourceslice",
nodeVertexType: "node",
podVertexType: "pod",
pvcVertexType: "pvc",
@@ -495,13 +495,13 @@ func (g *Graph) DeleteVolumeAttachment(name string) {
g.deleteVertex_locked(vaVertexType, "", name)
}
// AddNodeResourceSlice sets up edges for the following relationships:
// AddResourceSlice sets up edges for the following relationships:
//
// node resource slice -> node
func (g *Graph) AddNodeResourceSlice(sliceName, nodeName string) {
func (g *Graph) AddResourceSlice(sliceName, nodeName string) {
start := time.Now()
defer func() {
graphActionsDuration.WithLabelValues("AddNodeResourceSlice").Observe(time.Since(start).Seconds())
graphActionsDuration.WithLabelValues("AddResourceSlice").Observe(time.Since(start).Seconds())
}()
g.lock.Lock()
defer g.lock.Unlock()
@@ -516,10 +516,10 @@ func (g *Graph) AddNodeResourceSlice(sliceName, nodeName string) {
g.graph.SetEdge(newDestinationEdge(sliceVertex, nodeVertex, nodeVertex))
}
}
func (g *Graph) DeleteNodeResourceSlice(sliceName string) {
func (g *Graph) DeleteResourceSlice(sliceName string) {
start := time.Now()
defer func() {
graphActionsDuration.WithLabelValues("DeleteNodeResourceSlice").Observe(time.Since(start).Seconds())
graphActionsDuration.WithLabelValues("DeleteResourceSlice").Observe(time.Since(start).Seconds())
}()
g.lock.Lock()
defer g.lock.Unlock()

View File

@@ -41,7 +41,7 @@ func AddGraphEventHandlers(
pods corev1informers.PodInformer,
pvs corev1informers.PersistentVolumeInformer,
attachments storageinformers.VolumeAttachmentInformer,
slices resourcev1alpha2informers.NodeResourceSliceInformer,
slices resourcev1alpha2informers.ResourceSliceInformer,
) {
g := &graphPopulator{
graph: graph,
@@ -71,9 +71,9 @@ func AddGraphEventHandlers(
if slices != nil {
sliceHandler, _ := slices.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: g.addNodeResourceSlice,
AddFunc: g.addResourceSlice,
UpdateFunc: nil, // Not needed, NodeName is immutable.
DeleteFunc: g.deleteNodeResourceSlice,
DeleteFunc: g.deleteResourceSlice,
})
synced = append(synced, sliceHandler.HasSynced)
}
@@ -200,23 +200,23 @@ func (g *graphPopulator) deleteVolumeAttachment(obj interface{}) {
g.graph.DeleteVolumeAttachment(attachment.Name)
}
func (g *graphPopulator) addNodeResourceSlice(obj interface{}) {
slice, ok := obj.(*resourcev1alpha2.NodeResourceSlice)
func (g *graphPopulator) addResourceSlice(obj interface{}) {
slice, ok := obj.(*resourcev1alpha2.ResourceSlice)
if !ok {
klog.Infof("unexpected type %T", obj)
return
}
g.graph.AddNodeResourceSlice(slice.Name, slice.NodeName)
g.graph.AddResourceSlice(slice.Name, slice.NodeName)
}
func (g *graphPopulator) deleteNodeResourceSlice(obj interface{}) {
func (g *graphPopulator) deleteResourceSlice(obj interface{}) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
slice, ok := obj.(*resourcev1alpha2.NodeResourceSlice)
slice, ok := obj.(*resourcev1alpha2.ResourceSlice)
if !ok {
klog.Infof("unexpected type %T", obj)
return
}
g.graph.DeleteNodeResourceSlice(slice.Name)
g.graph.DeleteResourceSlice(slice.Name)
}

View File

@@ -50,7 +50,7 @@ import (
// node <- pod <- pvc <- pv
// node <- pod <- pvc <- pv <- secret
// node <- pod <- ResourceClaim
// 4. If a request is for a noderesourceslice, then authorize access if there is an
// 4. If a request is for a resourceslice, then authorize access if there is an
// edge from the existing slice object to the node, which is the case if the
// existing object has the node in its NodeName field. For create, the access gets
// granted because the noderestriction admission plugin checks that the NodeName
@@ -81,7 +81,7 @@ func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules
var (
configMapResource = api.Resource("configmaps")
secretResource = api.Resource("secrets")
nodeResourceSlice = resourceapi.Resource("noderesourceslices")
resourceSlice = resourceapi.Resource("resourceslices")
pvcResource = api.Resource("persistentvolumeclaims")
pvResource = api.Resource("persistentvolumes")
resourceClaimResource = resourceapi.Resource("resourceclaims")
@@ -136,8 +136,8 @@ func (r *NodeAuthorizer) Authorize(ctx context.Context, attrs authorizer.Attribu
return r.authorizeLease(nodeName, attrs)
case csiNodeResource:
return r.authorizeCSINode(nodeName, attrs)
case nodeResourceSlice:
return r.authorizeNodeResourceSlice(nodeName, attrs)
case resourceSlice:
return r.authorizeResourceSlice(nodeName, attrs)
}
}
@@ -302,11 +302,11 @@ func (r *NodeAuthorizer) authorizeCSINode(nodeName string, attrs authorizer.Attr
return authorizer.DecisionAllow, "", nil
}
// authorizeNodeResourceSlice authorizes node requests to NodeResourceSlice resource.k8s.io/noderesourceslices
func (r *NodeAuthorizer) authorizeNodeResourceSlice(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
// authorizeResourceSlice authorizes node requests to ResourceSlice resource.k8s.io/resourceslices
func (r *NodeAuthorizer) authorizeResourceSlice(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
if len(attrs.GetSubresource()) > 0 {
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "cannot authorize NodeResourceSlice subresources", nil
return authorizer.DecisionNoOpinion, "cannot authorize ResourceSlice subresources", nil
}
// allowed verbs: get, create, update, patch, delete
@@ -319,10 +319,10 @@ func (r *NodeAuthorizer) authorizeNodeResourceSlice(nodeName string, attrs autho
return authorizer.DecisionAllow, "", nil
default:
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a NodeResourceSlice", nil
return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a ResourceSlice", nil
}
// The request must come from a node with the same name as the NodeResourceSlice.NodeName field.
// The request must come from a node with the same name as the ResourceSlice.NodeName field.
//
// For create, the noderestriction admission plugin is performing this check.
// Here we don't have access to the content of the new object.

View File

@@ -338,65 +338,65 @@ func TestAuthorizer(t *testing.T) {
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "csinodes", APIGroup: "storage.k8s.io", Name: "node0"},
expect: authorizer.DecisionAllow,
},
// NodeResourceSlice
// ResourceSlice
{
name: "disallowed NodeResourceSlice with subresource",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "noderesourceslices", Subresource: "status", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
name: "disallowed ResourceSlice with subresource",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "resourceslices", Subresource: "status", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed get another node's NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
name: "disallowed get another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed update another node's NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "update", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
name: "disallowed update another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "update", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed patch another node's NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "patch", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
name: "disallowed patch another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "patch", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed delete another node's NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
name: "disallowed delete another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "allowed list NodeResourceSlices",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "noderesourceslices", APIGroup: "resource.k8s.io"},
name: "allowed list ResourceSlices",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "resourceslices", APIGroup: "resource.k8s.io"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed watch NodeResourceSlices",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "noderesourceslices", APIGroup: "resource.k8s.io"},
name: "allowed watch ResourceSlices",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "resourceslices", APIGroup: "resource.k8s.io"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed get NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
name: "allowed get ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed create NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "create", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
name: "allowed create ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "create", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed update NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "update", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
name: "allowed update ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "update", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed patch NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "patch", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
name: "allowed patch ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "patch", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed delete NodeResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "noderesourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
name: "allowed delete ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
}
@@ -831,7 +831,7 @@ func BenchmarkAuthorization(b *testing.B) {
}
}
func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*corev1.PersistentVolume, attachments []*storagev1.VolumeAttachment, slices []*resourcev1alpha2.NodeResourceSlice) {
func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*corev1.PersistentVolume, attachments []*storagev1.VolumeAttachment, slices []*resourcev1alpha2.ResourceSlice) {
p := &graphPopulator{}
p.graph = graph
for _, pod := range pods {
@@ -844,7 +844,7 @@ func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*cor
p.addVolumeAttachment(attachment)
}
for _, slice := range slices {
p.addNodeResourceSlice(slice)
p.addResourceSlice(slice)
}
}
@@ -859,12 +859,12 @@ func randomSubset(a, b int) []int {
// the secret/configmap/pvc/node references in the pod and pv objects are named to indicate the connections between the objects.
// for example, secret0-pod0-node0 is a secret referenced by pod0 which is bound to node0.
// when populated into the graph, the node authorizer should allow node0 to access that secret, but not node1.
func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.PersistentVolume, []*storagev1.VolumeAttachment, []*resourcev1alpha2.NodeResourceSlice) {
func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.PersistentVolume, []*storagev1.VolumeAttachment, []*resourcev1alpha2.ResourceSlice) {
nodes := make([]*corev1.Node, 0, opts.nodes)
pods := make([]*corev1.Pod, 0, opts.nodes*opts.podsPerNode)
pvs := make([]*corev1.PersistentVolume, 0, (opts.nodes*opts.podsPerNode*opts.uniquePVCsPerPod)+(opts.sharedPVCsPerPod*opts.namespaces))
attachments := make([]*storagev1.VolumeAttachment, 0, opts.nodes*opts.attachmentsPerNode)
slices := make([]*resourcev1alpha2.NodeResourceSlice, 0, opts.nodes*opts.nodeResourceCapacitiesPerNode)
slices := make([]*resourcev1alpha2.ResourceSlice, 0, opts.nodes*opts.nodeResourceCapacitiesPerNode)
rand.Seed(12345)
@@ -893,7 +893,7 @@ func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.Pe
for p := 0; p <= opts.nodeResourceCapacitiesPerNode; p++ {
name := fmt.Sprintf("slice%d-%s", p, nodeName)
slice := &resourcev1alpha2.NodeResourceSlice{
slice := &resourcev1alpha2.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: name},
NodeName: nodeName,
}

View File

@@ -582,7 +582,7 @@ func ClusterRoles() []rbacv1.ClusterRole {
rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(),
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("noderesourceslices", "resourceclassparameters", "resourceclaimparameters").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceslices", "resourceclassparameters", "resourceclaimparameters").RuleOrDie(),
)
}
roles = append(roles, rbacv1.ClusterRole{

File diff suppressed because it is too large Load Diff

View File

@@ -216,36 +216,6 @@ message NodeResourceModel {
optional NamedResourcesResources namedResources = 1;
}
// NodeResourceSlice provides information about available
// resources on individual nodes.
message NodeResourceSlice {
// Standard object metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// NodeName identifies the node where the capacity is available.
// A field selector can be used to list only NodeResourceSlice
// objects with a certain node name.
optional string nodeName = 2;
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only NodeResourceSlice
// objects with a certain driver name.
optional string driverName = 3;
optional NodeResourceModel nodeResourceModel = 4;
}
// NodeResourceSliceList is a collection of NodeResourceSlices.
message NodeResourceSliceList {
// Standard list metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of node resource capacity objects.
repeated NodeResourceSlice items = 2;
}
// PodSchedulingContext objects hold information that is needed to schedule
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
// mode.
@@ -704,6 +674,40 @@ message ResourceRequestModel {
optional NamedResourcesRequest namedResources = 1;
}
// ResourceSlice provides information about available
// resources on individual nodes.
message ResourceSlice {
// Standard object metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// NodeName identifies the node which provides the resources
// if they are local to a node.
//
// A field selector can be used to list only ResourceSlice
// objects with a certain node name.
//
// +optional
optional string nodeName = 2;
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only ResourceSlice
// objects with a certain driver name.
optional string driverName = 3;
optional NodeResourceModel nodeResourceModel = 4;
}
// ResourceSliceList is a collection of ResourceSlices.
message ResourceSliceList {
// Standard list metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of node resource capacity objects.
repeated ResourceSlice items = 2;
}
// StructuredResourceHandle is the in-tree representation of the allocation result.
message StructuredResourceHandle {
// VendorClassParameters are the per-claim configuration parameters
@@ -719,7 +723,10 @@ message StructuredResourceHandle {
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClaimParameters = 2;
// NodeName is the name of the node providing the necessary resources.
// NodeName is the name of the node providing the necessary resources
// if the resources are local to a node.
//
// +optional
optional string nodeName = 4;
// Results lists all allocated driver resources.

View File

@@ -52,8 +52,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimTemplateList{},
&PodSchedulingContext{},
&PodSchedulingContextList{},
&NodeResourceSlice{},
&NodeResourceSliceList{},
&ResourceSlice{},
&ResourceSliceList{},
&ResourceClaimParameters{},
&ResourceClaimParametersList{},
&ResourceClassParameters{},

View File

@@ -224,8 +224,11 @@ type StructuredResourceHandle struct {
// +optional
VendorClaimParameters runtime.RawExtension `json:"vendorClaimParameters,omitempty" protobuf:"bytes,2,opt,name=vendorClaimParameters"`
// NodeName is the name of the node providing the necessary resources.
NodeName string `json:"nodeName" protobuf:"bytes,4,name=nodeName"`
// NodeName is the name of the node providing the necessary resources
// if the resources are local to a node.
//
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,4,name=nodeName"`
// Results lists all allocated driver resources.
//
@@ -529,21 +532,25 @@ type ResourceClaimTemplateList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// NodeResourceSlice provides information about available
// ResourceSlice provides information about available
// resources on individual nodes.
type NodeResourceSlice struct {
type ResourceSlice struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// NodeName identifies the node where the capacity is available.
// A field selector can be used to list only NodeResourceSlice
// NodeName identifies the node which provides the resources
// if they are local to a node.
//
// A field selector can be used to list only ResourceSlice
// objects with a certain node name.
NodeName string `json:"nodeName" protobuf:"bytes,2,name=nodeName"`
//
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,2,opt,name=nodeName"`
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only NodeResourceSlice
// A field selector can be used to list only ResourceSlice
// objects with a certain driver name.
DriverName string `json:"driverName" protobuf:"bytes,3,name=driverName"`
@@ -561,15 +568,15 @@ type NodeResourceModel struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// NodeResourceSliceList is a collection of NodeResourceSlices.
type NodeResourceSliceList struct {
// ResourceSliceList is a collection of ResourceSlices.
type ResourceSliceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of node resource capacity objects.
Items []NodeResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient

View File

@@ -76,27 +76,6 @@ func (NodeResourceModel) SwaggerDoc() map[string]string {
return map_NodeResourceModel
}
var map_NodeResourceSlice = map[string]string{
"": "NodeResourceSlice provides information about available resources on individual nodes.",
"metadata": "Standard object metadata",
"nodeName": "NodeName identifies the node where the capacity is available. A field selector can be used to list only NodeResourceSlice objects with a certain node name.",
"driverName": "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only NodeResourceSlice objects with a certain driver name.",
}
func (NodeResourceSlice) SwaggerDoc() map[string]string {
return map_NodeResourceSlice
}
var map_NodeResourceSliceList = map[string]string{
"": "NodeResourceSliceList is a collection of NodeResourceSlices.",
"metadata": "Standard list metadata",
"items": "Items is the list of node resource capacity objects.",
}
func (NodeResourceSliceList) SwaggerDoc() map[string]string {
return map_NodeResourceSliceList
}
var map_PodSchedulingContext = map[string]string{
"": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"metadata": "Standard object metadata",
@@ -370,11 +349,32 @@ func (ResourceRequestModel) SwaggerDoc() map[string]string {
return map_ResourceRequestModel
}
var map_ResourceSlice = map[string]string{
"": "ResourceSlice provides information about available resources on individual nodes.",
"metadata": "Standard object metadata",
"nodeName": "NodeName identifies the node which provides the resources if they are local to a node.\n\nA field selector can be used to list only ResourceSlice objects with a certain node name.",
"driverName": "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.",
}
func (ResourceSlice) SwaggerDoc() map[string]string {
return map_ResourceSlice
}
var map_ResourceSliceList = map[string]string{
"": "ResourceSliceList is a collection of ResourceSlices.",
"metadata": "Standard list metadata",
"items": "Items is the list of node resource capacity objects.",
}
func (ResourceSliceList) SwaggerDoc() map[string]string {
return map_ResourceSliceList
}
var map_StructuredResourceHandle = map[string]string{
"": "StructuredResourceHandle is the in-tree representation of the allocation result.",
"vendorClassParameters": "VendorClassParameters are the per-claim configuration parameters from the resource class at the time that the claim was allocated.",
"vendorClaimParameters": "VendorClaimParameters are the per-claim configuration parameters from the resource claim parameters at the time that the claim was allocated.",
"nodeName": "NodeName is the name of the node providing the necessary resources.",
"nodeName": "NodeName is the name of the node providing the necessary resources if the resources are local to a node.",
"results": "Results lists all allocated driver resources.",
}

View File

@@ -342,66 +342,6 @@ func (in *NodeResourceModel) DeepCopy() *NodeResourceModel {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceSlice) DeepCopyInto(out *NodeResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.NodeResourceModel.DeepCopyInto(&out.NodeResourceModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceSlice.
func (in *NodeResourceSlice) DeepCopy() *NodeResourceSlice {
if in == nil {
return nil
}
out := new(NodeResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceSliceList) DeepCopyInto(out *NodeResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NodeResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceSliceList.
func (in *NodeResourceSliceList) DeepCopy() *NodeResourceSliceList {
if in == nil {
return nil
}
out := new(NodeResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
*out = *in
@@ -1083,6 +1023,66 @@ func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.NodeResourceModel.DeepCopyInto(&out.NodeResourceModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
func (in *ResourceSlice) DeepCopy() *ResourceSlice {
if in == nil {
return nil
}
out := new(ResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
if in == nil {
return nil
}
out := new(ResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) {
*out = *in

View File

@@ -1,5 +1,5 @@
{
"kind": "NodeResourceSlice",
"kind": "ResourceSlice",
"apiVersion": "resource.k8s.io/v1alpha2",
"metadata": {
"name": "nameValue",

View File

@@ -1,6 +1,6 @@
apiVersion: resource.k8s.io/v1alpha2
driverName: driverNameValue
kind: NodeResourceSlice
kind: ResourceSlice
metadata:
annotations:
annotationsKey: annotationsValue

View File

@@ -12058,30 +12058,6 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
scalar: string
elementRelationship: atomic
- name: io.k8s.api.resource.v1alpha2.NodeResourceSlice
map:
fields:
- name: apiVersion
type:
scalar: string
- name: driverName
type:
scalar: string
default: ""
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: namedResources
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesResources
- name: nodeName
type:
scalar: string
default: ""
- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext
map:
fields:
@@ -12378,13 +12354,35 @@ var schemaYAML = typed.YAMLObject(`types:
- name: vendorParameters
type:
namedType: __untyped_atomic_
- name: io.k8s.api.resource.v1alpha2.ResourceSlice
map:
fields:
- name: apiVersion
type:
scalar: string
- name: driverName
type:
scalar: string
default: ""
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: namedResources
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesResources
- name: nodeName
type:
scalar: string
- name: io.k8s.api.resource.v1alpha2.StructuredResourceHandle
map:
fields:
- name: nodeName
type:
scalar: string
default: ""
- name: results
type:
list:

View File

@@ -27,9 +27,9 @@ import (
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// NodeResourceSliceApplyConfiguration represents an declarative configuration of the NodeResourceSlice type for use
// ResourceSliceApplyConfiguration represents an declarative configuration of the ResourceSlice type for use
// with apply.
type NodeResourceSliceApplyConfiguration struct {
type ResourceSliceApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
NodeName *string `json:"nodeName,omitempty"`
@@ -37,47 +37,47 @@ type NodeResourceSliceApplyConfiguration struct {
NodeResourceModelApplyConfiguration `json:",inline"`
}
// NodeResourceSlice constructs an declarative configuration of the NodeResourceSlice type for use with
// ResourceSlice constructs an declarative configuration of the ResourceSlice type for use with
// apply.
func NodeResourceSlice(name string) *NodeResourceSliceApplyConfiguration {
b := &NodeResourceSliceApplyConfiguration{}
func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
b := &ResourceSliceApplyConfiguration{}
b.WithName(name)
b.WithKind("NodeResourceSlice")
b.WithKind("ResourceSlice")
b.WithAPIVersion("resource.k8s.io/v1alpha2")
return b
}
// ExtractNodeResourceSlice extracts the applied configuration owned by fieldManager from
// nodeResourceSlice. If no managedFields are found in nodeResourceSlice for fieldManager, a
// NodeResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// nodeResourceSlice must be a unmodified NodeResourceSlice API object that was retrieved from the Kubernetes API.
// ExtractNodeResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractNodeResourceSlice(nodeResourceSlice *resourcev1alpha2.NodeResourceSlice, fieldManager string) (*NodeResourceSliceApplyConfiguration, error) {
return extractNodeResourceSlice(nodeResourceSlice, fieldManager, "")
func ExtractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
return extractResourceSlice(resourceSlice, fieldManager, "")
}
// ExtractNodeResourceSliceStatus is the same as ExtractNodeResourceSlice except
// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
// that it extracts the status subresource applied configuration.
// Experimental!
func ExtractNodeResourceSliceStatus(nodeResourceSlice *resourcev1alpha2.NodeResourceSlice, fieldManager string) (*NodeResourceSliceApplyConfiguration, error) {
return extractNodeResourceSlice(nodeResourceSlice, fieldManager, "status")
func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
return extractResourceSlice(resourceSlice, fieldManager, "status")
}
func extractNodeResourceSlice(nodeResourceSlice *resourcev1alpha2.NodeResourceSlice, fieldManager string, subresource string) (*NodeResourceSliceApplyConfiguration, error) {
b := &NodeResourceSliceApplyConfiguration{}
err := managedfields.ExtractInto(nodeResourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha2.NodeResourceSlice"), fieldManager, b, subresource)
func extractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
b := &ResourceSliceApplyConfiguration{}
err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceSlice"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
b.WithName(nodeResourceSlice.Name)
b.WithName(resourceSlice.Name)
b.WithKind("NodeResourceSlice")
b.WithKind("ResourceSlice")
b.WithAPIVersion("resource.k8s.io/v1alpha2")
return b, nil
}
@@ -85,7 +85,7 @@ func extractNodeResourceSlice(nodeResourceSlice *resourcev1alpha2.NodeResourceSl
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithKind(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration {
b.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithKind(value string) *NodeResour
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithAPIVersion(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration {
b.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithAPIVersion(value string) *Node
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithName(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Name = &value
return b
@@ -110,7 +110,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithName(value string) *NodeResour
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithGenerateName(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.GenerateName = &value
return b
@@ -119,7 +119,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithGenerateName(value string) *No
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithNamespace(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Namespace = &value
return b
@@ -128,7 +128,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithNamespace(value string) *NodeR
// WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithUID(value types.UID) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.UID = &value
return b
@@ -137,7 +137,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithUID(value types.UID) *NodeReso
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithResourceVersion(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.ResourceVersion = &value
return b
@@ -146,7 +146,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithResourceVersion(value string)
// WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithGeneration(value int64) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.Generation = &value
return b
@@ -155,7 +155,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithGeneration(value int64) *NodeR
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.CreationTimestamp = &value
return b
@@ -164,7 +164,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionTimestamp = &value
return b
@@ -173,7 +173,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
b.DeletionGracePeriodSeconds = &value
return b
@@ -183,7 +183,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(val
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key.
func (b *NodeResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Labels == nil && len(entries) > 0 {
b.Labels = make(map[string]string, len(entries))
@@ -198,7 +198,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithLabels(entries map[string]stri
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key.
func (b *NodeResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
if b.Annotations == nil && len(entries) > 0 {
b.Annotations = make(map[string]string, len(entries))
@@ -212,7 +212,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithAnnotations(entries map[string
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
func (b *NodeResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
@@ -226,7 +226,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
func (b *NodeResourceSliceApplyConfiguration) WithFinalizers(values ...string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
b.Finalizers = append(b.Finalizers, values[i])
@@ -234,7 +234,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithFinalizers(values ...string) *
return b
}
func (b *NodeResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
}
@@ -243,7 +243,7 @@ func (b *NodeResourceSliceApplyConfiguration) ensureObjectMetaApplyConfiguration
// WithNodeName sets the NodeName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeName field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithNodeName(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithNodeName(value string) *ResourceSliceApplyConfiguration {
b.NodeName = &value
return b
}
@@ -251,7 +251,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithNodeName(value string) *NodeRe
// WithDriverName sets the DriverName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DriverName field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithDriverName(value string) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithDriverName(value string) *ResourceSliceApplyConfiguration {
b.DriverName = &value
return b
}
@@ -259,7 +259,7 @@ func (b *NodeResourceSliceApplyConfiguration) WithDriverName(value string) *Node
// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NamedResources field is set to the value of the last call.
func (b *NodeResourceSliceApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *NodeResourceSliceApplyConfiguration {
func (b *ResourceSliceApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceSliceApplyConfiguration {
b.NamedResources = value
return b
}

View File

@@ -1549,8 +1549,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &resourcev1alpha2.NamedResourcesStringSliceApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("NodeResourceModel"):
return &resourcev1alpha2.NodeResourceModelApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("NodeResourceSlice"):
return &resourcev1alpha2.NodeResourceSliceApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext"):
return &resourcev1alpha2.PodSchedulingContextApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"):
@@ -1591,6 +1589,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &resourcev1alpha2.ResourceRequestApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("ResourceRequestModel"):
return &resourcev1alpha2.ResourceRequestModelApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("ResourceSlice"):
return &resourcev1alpha2.ResourceSliceApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("StructuredResourceHandle"):
return &resourcev1alpha2.StructuredResourceHandleApplyConfiguration{}
case v1alpha2.SchemeGroupVersion.WithKind("VendorParameters"):

View File

@@ -362,8 +362,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil
// Group=resource.k8s.io, Version=v1alpha2
case v1alpha2.SchemeGroupVersion.WithResource("noderesourceslices"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().NodeResourceSlices().Informer()}, nil
case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil
case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"):
@@ -376,6 +374,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil
case v1alpha2.SchemeGroupVersion.WithResource("resourceclassparameters"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClassParameters().Informer()}, nil
case v1alpha2.SchemeGroupVersion.WithResource("resourceslices"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceSlices().Informer()}, nil
// Group=scheduling.k8s.io, Version=v1
case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"):

View File

@@ -24,8 +24,6 @@ import (
// Interface provides access to all the informers in this group version.
type Interface interface {
// NodeResourceSlices returns a NodeResourceSliceInformer.
NodeResourceSlices() NodeResourceSliceInformer
// PodSchedulingContexts returns a PodSchedulingContextInformer.
PodSchedulingContexts() PodSchedulingContextInformer
// ResourceClaims returns a ResourceClaimInformer.
@@ -38,6 +36,8 @@ type Interface interface {
ResourceClasses() ResourceClassInformer
// ResourceClassParameters returns a ResourceClassParametersInformer.
ResourceClassParameters() ResourceClassParametersInformer
// ResourceSlices returns a ResourceSliceInformer.
ResourceSlices() ResourceSliceInformer
}
type version struct {
@@ -51,11 +51,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// NodeResourceSlices returns a NodeResourceSliceInformer.
func (v *version) NodeResourceSlices() NodeResourceSliceInformer {
return &nodeResourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
// PodSchedulingContexts returns a PodSchedulingContextInformer.
func (v *version) PodSchedulingContexts() PodSchedulingContextInformer {
return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
@@ -85,3 +80,8 @@ func (v *version) ResourceClasses() ResourceClassInformer {
func (v *version) ResourceClassParameters() ResourceClassParametersInformer {
return &resourceClassParametersInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// ResourceSlices returns a ResourceSliceInformer.
func (v *version) ResourceSlices() ResourceSliceInformer {
return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}

View File

@@ -32,58 +32,58 @@ import (
cache "k8s.io/client-go/tools/cache"
)
// NodeResourceSliceInformer provides access to a shared informer and lister for
// NodeResourceSlices.
type NodeResourceSliceInformer interface {
// ResourceSliceInformer provides access to a shared informer and lister for
// ResourceSlices.
type ResourceSliceInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha2.NodeResourceSliceLister
Lister() v1alpha2.ResourceSliceLister
}
type nodeResourceSliceInformer struct {
type resourceSliceInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewNodeResourceSliceInformer constructs a new informer for NodeResourceSlice type.
// NewResourceSliceInformer constructs a new informer for ResourceSlice type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewNodeResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredNodeResourceSliceInformer(client, resyncPeriod, indexers, nil)
func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredNodeResourceSliceInformer constructs a new informer for NodeResourceSlice type.
// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNodeResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ResourceV1alpha2().NodeResourceSlices().List(context.TODO(), options)
return client.ResourceV1alpha2().ResourceSlices().List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ResourceV1alpha2().NodeResourceSlices().Watch(context.TODO(), options)
return client.ResourceV1alpha2().ResourceSlices().Watch(context.TODO(), options)
},
},
&resourcev1alpha2.NodeResourceSlice{},
&resourcev1alpha2.ResourceSlice{},
resyncPeriod,
indexers,
)
}
func (f *nodeResourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredNodeResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *nodeResourceSliceInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&resourcev1alpha2.NodeResourceSlice{}, f.defaultInformer)
func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&resourcev1alpha2.ResourceSlice{}, f.defaultInformer)
}
func (f *nodeResourceSliceInformer) Lister() v1alpha2.NodeResourceSliceLister {
return v1alpha2.NewNodeResourceSliceLister(f.Informer().GetIndexer())
func (f *resourceSliceInformer) Lister() v1alpha2.ResourceSliceLister {
return v1alpha2.NewResourceSliceLister(f.Informer().GetIndexer())
}

View File

@@ -1,145 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
json "encoding/json"
"fmt"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
testing "k8s.io/client-go/testing"
)
// FakeNodeResourceSlices implements NodeResourceSliceInterface
type FakeNodeResourceSlices struct {
Fake *FakeResourceV1alpha2
}
var noderesourceslicesResource = v1alpha2.SchemeGroupVersion.WithResource("noderesourceslices")
var noderesourceslicesKind = v1alpha2.SchemeGroupVersion.WithKind("NodeResourceSlice")
// Get takes name of the nodeResourceSlice, and returns the corresponding nodeResourceSlice object, and an error if there is any.
func (c *FakeNodeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.NodeResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(noderesourceslicesResource, name), &v1alpha2.NodeResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.NodeResourceSlice), err
}
// List takes label and field selectors, and returns the list of NodeResourceSlices that match those selectors.
func (c *FakeNodeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.NodeResourceSliceList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(noderesourceslicesResource, noderesourceslicesKind, opts), &v1alpha2.NodeResourceSliceList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.NodeResourceSliceList{ListMeta: obj.(*v1alpha2.NodeResourceSliceList).ListMeta}
for _, item := range obj.(*v1alpha2.NodeResourceSliceList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested nodeResourceSlices.
func (c *FakeNodeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(noderesourceslicesResource, opts))
}
// Create takes the representation of a nodeResourceSlice and creates it. Returns the server's representation of the nodeResourceSlice, and an error, if there is any.
func (c *FakeNodeResourceSlices) Create(ctx context.Context, nodeResourceSlice *v1alpha2.NodeResourceSlice, opts v1.CreateOptions) (result *v1alpha2.NodeResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(noderesourceslicesResource, nodeResourceSlice), &v1alpha2.NodeResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.NodeResourceSlice), err
}
// Update takes the representation of a nodeResourceSlice and updates it. Returns the server's representation of the nodeResourceSlice, and an error, if there is any.
func (c *FakeNodeResourceSlices) Update(ctx context.Context, nodeResourceSlice *v1alpha2.NodeResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.NodeResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(noderesourceslicesResource, nodeResourceSlice), &v1alpha2.NodeResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.NodeResourceSlice), err
}
// Delete takes name of the nodeResourceSlice and deletes it. Returns an error if one occurs.
func (c *FakeNodeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteActionWithOptions(noderesourceslicesResource, name, opts), &v1alpha2.NodeResourceSlice{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeNodeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(noderesourceslicesResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha2.NodeResourceSliceList{})
return err
}
// Patch applies the patch and returns the patched nodeResourceSlice.
func (c *FakeNodeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.NodeResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(noderesourceslicesResource, name, pt, data, subresources...), &v1alpha2.NodeResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.NodeResourceSlice), err
}
// Apply takes the given apply declarative configuration, applies it and returns the applied nodeResourceSlice.
func (c *FakeNodeResourceSlices) Apply(ctx context.Context, nodeResourceSlice *resourcev1alpha2.NodeResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.NodeResourceSlice, err error) {
if nodeResourceSlice == nil {
return nil, fmt.Errorf("nodeResourceSlice provided to Apply must not be nil")
}
data, err := json.Marshal(nodeResourceSlice)
if err != nil {
return nil, err
}
name := nodeResourceSlice.Name
if name == nil {
return nil, fmt.Errorf("nodeResourceSlice.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(noderesourceslicesResource, *name, types.ApplyPatchType, data), &v1alpha2.NodeResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.NodeResourceSlice), err
}

View File

@@ -28,10 +28,6 @@ type FakeResourceV1alpha2 struct {
*testing.Fake
}
func (c *FakeResourceV1alpha2) NodeResourceSlices() v1alpha2.NodeResourceSliceInterface {
return &FakeNodeResourceSlices{c}
}
func (c *FakeResourceV1alpha2) PodSchedulingContexts(namespace string) v1alpha2.PodSchedulingContextInterface {
return &FakePodSchedulingContexts{c, namespace}
}
@@ -56,6 +52,10 @@ func (c *FakeResourceV1alpha2) ResourceClassParameters(namespace string) v1alpha
return &FakeResourceClassParameters{c, namespace}
}
func (c *FakeResourceV1alpha2) ResourceSlices() v1alpha2.ResourceSliceInterface {
return &FakeResourceSlices{c}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeResourceV1alpha2) RESTClient() rest.Interface {

View File

@@ -0,0 +1,145 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
json "encoding/json"
"fmt"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
testing "k8s.io/client-go/testing"
)
// FakeResourceSlices implements ResourceSliceInterface
type FakeResourceSlices struct {
Fake *FakeResourceV1alpha2
}
var resourceslicesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceslices")
var resourceslicesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceSlice")
// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any.
func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(resourceslicesResource, name), &v1alpha2.ResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ResourceSlice), err
}
// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors.
func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(resourceslicesResource, resourceslicesKind, opts), &v1alpha2.ResourceSliceList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.ResourceSliceList{ListMeta: obj.(*v1alpha2.ResourceSliceList).ListMeta}
for _, item := range obj.(*v1alpha2.ResourceSliceList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested resourceSlices.
func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(resourceslicesResource, opts))
}
// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ResourceSlice), err
}
// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ResourceSlice), err
}
// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs.
func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha2.ResourceSlice{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(resourceslicesResource, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha2.ResourceSliceList{})
return err
}
// Patch applies the patch and returns the patched resourceSlice.
func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, name, pt, data, subresources...), &v1alpha2.ResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ResourceSlice), err
}
// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice.
func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) {
if resourceSlice == nil {
return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil")
}
data, err := json.Marshal(resourceSlice)
if err != nil {
return nil, err
}
name := resourceSlice.Name
if name == nil {
return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply")
}
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceSlice{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.ResourceSlice), err
}

View File

@@ -18,8 +18,6 @@ limitations under the License.
package v1alpha2
type NodeResourceSliceExpansion interface{}
type PodSchedulingContextExpansion interface{}
type ResourceClaimExpansion interface{}
@@ -31,3 +29,5 @@ type ResourceClaimTemplateExpansion interface{}
type ResourceClassExpansion interface{}
type ResourceClassParametersExpansion interface{}
type ResourceSliceExpansion interface{}

View File

@@ -1,197 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
import (
"context"
json "encoding/json"
"fmt"
"time"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
// NodeResourceSlicesGetter has a method to return a NodeResourceSliceInterface.
// A group's client should implement this interface.
type NodeResourceSlicesGetter interface {
NodeResourceSlices() NodeResourceSliceInterface
}
// NodeResourceSliceInterface has methods to work with NodeResourceSlice resources.
type NodeResourceSliceInterface interface {
Create(ctx context.Context, nodeResourceSlice *v1alpha2.NodeResourceSlice, opts v1.CreateOptions) (*v1alpha2.NodeResourceSlice, error)
Update(ctx context.Context, nodeResourceSlice *v1alpha2.NodeResourceSlice, opts v1.UpdateOptions) (*v1alpha2.NodeResourceSlice, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.NodeResourceSlice, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.NodeResourceSliceList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.NodeResourceSlice, err error)
Apply(ctx context.Context, nodeResourceSlice *resourcev1alpha2.NodeResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.NodeResourceSlice, err error)
NodeResourceSliceExpansion
}
// nodeResourceSlices implements NodeResourceSliceInterface
type nodeResourceSlices struct {
client rest.Interface
}
// newNodeResourceSlices returns a NodeResourceSlices
func newNodeResourceSlices(c *ResourceV1alpha2Client) *nodeResourceSlices {
return &nodeResourceSlices{
client: c.RESTClient(),
}
}
// Get takes name of the nodeResourceSlice, and returns the corresponding nodeResourceSlice object, and an error if there is any.
func (c *nodeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.NodeResourceSlice, err error) {
result = &v1alpha2.NodeResourceSlice{}
err = c.client.Get().
Resource("noderesourceslices").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of NodeResourceSlices that match those selectors.
func (c *nodeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.NodeResourceSliceList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha2.NodeResourceSliceList{}
err = c.client.Get().
Resource("noderesourceslices").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested nodeResourceSlices.
func (c *nodeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("noderesourceslices").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a nodeResourceSlice and creates it. Returns the server's representation of the nodeResourceSlice, and an error, if there is any.
func (c *nodeResourceSlices) Create(ctx context.Context, nodeResourceSlice *v1alpha2.NodeResourceSlice, opts v1.CreateOptions) (result *v1alpha2.NodeResourceSlice, err error) {
result = &v1alpha2.NodeResourceSlice{}
err = c.client.Post().
Resource("noderesourceslices").
VersionedParams(&opts, scheme.ParameterCodec).
Body(nodeResourceSlice).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a nodeResourceSlice and updates it. Returns the server's representation of the nodeResourceSlice, and an error, if there is any.
func (c *nodeResourceSlices) Update(ctx context.Context, nodeResourceSlice *v1alpha2.NodeResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.NodeResourceSlice, err error) {
result = &v1alpha2.NodeResourceSlice{}
err = c.client.Put().
Resource("noderesourceslices").
Name(nodeResourceSlice.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(nodeResourceSlice).
Do(ctx).
Into(result)
return
}
// Delete takes name of the nodeResourceSlice and deletes it. Returns an error if one occurs.
func (c *nodeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("noderesourceslices").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *nodeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("noderesourceslices").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched nodeResourceSlice.
func (c *nodeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.NodeResourceSlice, err error) {
result = &v1alpha2.NodeResourceSlice{}
err = c.client.Patch(pt).
Resource("noderesourceslices").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// Apply takes the given apply declarative configuration, applies it and returns the applied nodeResourceSlice.
func (c *nodeResourceSlices) Apply(ctx context.Context, nodeResourceSlice *resourcev1alpha2.NodeResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.NodeResourceSlice, err error) {
if nodeResourceSlice == nil {
return nil, fmt.Errorf("nodeResourceSlice provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(nodeResourceSlice)
if err != nil {
return nil, err
}
name := nodeResourceSlice.Name
if name == nil {
return nil, fmt.Errorf("nodeResourceSlice.Name must be provided to Apply")
}
result = &v1alpha2.NodeResourceSlice{}
err = c.client.Patch(types.ApplyPatchType).
Resource("noderesourceslices").
Name(*name).
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@@ -28,13 +28,13 @@ import (
type ResourceV1alpha2Interface interface {
RESTClient() rest.Interface
NodeResourceSlicesGetter
PodSchedulingContextsGetter
ResourceClaimsGetter
ResourceClaimParametersGetter
ResourceClaimTemplatesGetter
ResourceClassesGetter
ResourceClassParametersGetter
ResourceSlicesGetter
}
// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group.
@@ -42,10 +42,6 @@ type ResourceV1alpha2Client struct {
restClient rest.Interface
}
func (c *ResourceV1alpha2Client) NodeResourceSlices() NodeResourceSliceInterface {
return newNodeResourceSlices(c)
}
func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface {
return newPodSchedulingContexts(c, namespace)
}
@@ -70,6 +66,10 @@ func (c *ResourceV1alpha2Client) ResourceClassParameters(namespace string) Resou
return newResourceClassParameters(c, namespace)
}
func (c *ResourceV1alpha2Client) ResourceSlices() ResourceSliceInterface {
return newResourceSlices(c)
}
// NewForConfig creates a new ResourceV1alpha2Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).

View File

@@ -0,0 +1,197 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
import (
"context"
json "encoding/json"
"fmt"
"time"
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
// ResourceSlicesGetter has a method to return a ResourceSliceInterface.
// A group's client should implement this interface.
type ResourceSlicesGetter interface {
ResourceSlices() ResourceSliceInterface
}
// ResourceSliceInterface has methods to work with ResourceSlice resources.
type ResourceSliceInterface interface {
Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (*v1alpha2.ResourceSlice, error)
Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (*v1alpha2.ResourceSlice, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceSlice, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceSliceList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error)
Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error)
ResourceSliceExpansion
}
// resourceSlices implements ResourceSliceInterface
type resourceSlices struct {
client rest.Interface
}
// newResourceSlices returns a ResourceSlices
func newResourceSlices(c *ResourceV1alpha2Client) *resourceSlices {
return &resourceSlices{
client: c.RESTClient(),
}
}
// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any.
func (c *resourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) {
result = &v1alpha2.ResourceSlice{}
err = c.client.Get().
Resource("resourceslices").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors.
func (c *resourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha2.ResourceSliceList{}
err = c.client.Get().
Resource("resourceslices").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested resourceSlices.
func (c *resourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("resourceslices").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
func (c *resourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) {
result = &v1alpha2.ResourceSlice{}
err = c.client.Post().
Resource("resourceslices").
VersionedParams(&opts, scheme.ParameterCodec).
Body(resourceSlice).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any.
func (c *resourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) {
result = &v1alpha2.ResourceSlice{}
err = c.client.Put().
Resource("resourceslices").
Name(resourceSlice.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(resourceSlice).
Do(ctx).
Into(result)
return
}
// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs.
func (c *resourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("resourceslices").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *resourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("resourceslices").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched resourceSlice.
func (c *resourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) {
result = &v1alpha2.ResourceSlice{}
err = c.client.Patch(pt).
Resource("resourceslices").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice.
func (c *resourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) {
if resourceSlice == nil {
return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(resourceSlice)
if err != nil {
return nil, err
}
name := resourceSlice.Name
if name == nil {
return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply")
}
result = &v1alpha2.ResourceSlice{}
err = c.client.Patch(types.ApplyPatchType).
Resource("resourceslices").
Name(*name).
VersionedParams(&patchOpts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@@ -18,10 +18,6 @@ limitations under the License.
package v1alpha2
// NodeResourceSliceListerExpansion allows custom methods to be added to
// NodeResourceSliceLister.
type NodeResourceSliceListerExpansion interface{}
// PodSchedulingContextListerExpansion allows custom methods to be added to
// PodSchedulingContextLister.
type PodSchedulingContextListerExpansion interface{}
@@ -65,3 +61,7 @@ type ResourceClassParametersListerExpansion interface{}
// ResourceClassParametersNamespaceListerExpansion allows custom methods to be added to
// ResourceClassParametersNamespaceLister.
type ResourceClassParametersNamespaceListerExpansion interface{}
// ResourceSliceListerExpansion allows custom methods to be added to
// ResourceSliceLister.
type ResourceSliceListerExpansion interface{}

View File

@@ -1,68 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
import (
v1alpha2 "k8s.io/api/resource/v1alpha2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// NodeResourceSliceLister helps list NodeResourceSlices.
// All objects returned here must be treated as read-only.
type NodeResourceSliceLister interface {
// List lists all NodeResourceSlices in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.NodeResourceSlice, err error)
// Get retrieves the NodeResourceSlice from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha2.NodeResourceSlice, error)
NodeResourceSliceListerExpansion
}
// nodeResourceSliceLister implements the NodeResourceSliceLister interface.
type nodeResourceSliceLister struct {
indexer cache.Indexer
}
// NewNodeResourceSliceLister returns a new NodeResourceSliceLister.
func NewNodeResourceSliceLister(indexer cache.Indexer) NodeResourceSliceLister {
return &nodeResourceSliceLister{indexer: indexer}
}
// List lists all NodeResourceSlices in the indexer.
func (s *nodeResourceSliceLister) List(selector labels.Selector) (ret []*v1alpha2.NodeResourceSlice, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.NodeResourceSlice))
})
return ret, err
}
// Get retrieves the NodeResourceSlice from the index for a given name.
func (s *nodeResourceSliceLister) Get(name string) (*v1alpha2.NodeResourceSlice, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("noderesourceslice"), name)
}
return obj.(*v1alpha2.NodeResourceSlice), nil
}

View File

@@ -0,0 +1,68 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
import (
v1alpha2 "k8s.io/api/resource/v1alpha2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ResourceSliceLister helps list ResourceSlices.
// All objects returned here must be treated as read-only.
type ResourceSliceLister interface {
// List lists all ResourceSlices in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha2.ResourceSlice, err error)
// Get retrieves the ResourceSlice from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha2.ResourceSlice, error)
ResourceSliceListerExpansion
}
// resourceSliceLister implements the ResourceSliceLister interface.
type resourceSliceLister struct {
indexer cache.Indexer
}
// NewResourceSliceLister returns a new ResourceSliceLister.
func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister {
return &resourceSliceLister{indexer: indexer}
}
// List lists all ResourceSlices in the indexer.
func (s *resourceSliceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceSlice, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.ResourceSlice))
})
return ret, err
}
// Get retrieves the ResourceSlice from the index for a given name.
func (s *resourceSliceLister) Get(name string) (*v1alpha2.ResourceSlice, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("resourceslice"), name)
}
return obj.(*v1alpha2.ResourceSlice), nil
}

View File

@@ -362,8 +362,8 @@ func (d *Driver) TearDown() {
}
func (d *Driver) IsGone(ctx context.Context) {
gomega.Eventually(ctx, func(ctx context.Context) ([]resourcev1alpha2.NodeResourceSlice, error) {
slices, err := d.f.ClientSet.ResourceV1alpha2().NodeResourceSlices().List(ctx, metav1.ListOptions{FieldSelector: "driverName=" + d.Name})
gomega.Eventually(ctx, func(ctx context.Context) ([]resourcev1alpha2.ResourceSlice, error) {
slices, err := d.f.ClientSet.ResourceV1alpha2().ResourceSlices().List(ctx, metav1.ListOptions{FieldSelector: "driverName=" + d.Name})
if err != nil {
return nil, err
}

View File

@@ -203,7 +203,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
driver := NewDriver(f, nodes, perNode(1, nodes))
driver.parameterMode = parameterModeStructured
f.It("must manage NodeResourceSlice", f.WithSlow(), func(ctx context.Context) {
f.It("must manage ResourceSlice", f.WithSlow(), func(ctx context.Context) {
nodeName := nodes.NodeNames[0]
driverName := driver.Name
m := MethodInstance{nodeName, NodeListAndWatchResourcesMethod}
@@ -212,8 +212,8 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
return driver.CallCount(m)
}).WithTimeout(podStartTimeout).Should(gomega.BeNumerically(">", int64(0)), "NodeListAndWatchResources call count")
ginkgo.By("check if NodeResourceSlice object exists on the API server")
resourceClient := f.ClientSet.ResourceV1alpha2().NodeResourceSlices()
ginkgo.By("check if ResourceSlice object exists on the API server")
resourceClient := f.ClientSet.ResourceV1alpha2().ResourceSlices()
matchSlices := gomega.And(
gomega.HaveLen(1),
gomega.ContainElement(gstruct.MatchAllFields(gstruct.Fields{
@@ -226,7 +226,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
}}),
})),
)
getSlices := func(ctx context.Context) ([]resourcev1alpha2.NodeResourceSlice, error) {
getSlices := func(ctx context.Context) ([]resourcev1alpha2.ResourceSlice, error) {
slices, err := resourceClient.List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("nodeName=%s,driverName=%s", nodeName, driverName)})
if err != nil {
return nil, err
@@ -1463,10 +1463,10 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) []klog.KMe
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClassParameters(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha2.ResourceClaimParameters:
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaimParameters(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha2.NodeResourceSlice:
createdObj, err = b.f.ClientSet.ResourceV1alpha2().NodeResourceSlices().Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha2.ResourceSlice:
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceSlices().Create(ctx, obj, metav1.CreateOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := b.f.ClientSet.ResourceV1alpha2().NodeResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
err := b.f.ClientSet.ResourceV1alpha2().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete node resource slice")
})
default:

View File

@@ -454,9 +454,9 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
Stub: `{"metadata": {"name": "claim1parameters"}}`,
ExpectedEtcdPath: "/registry/resourceclaimparameters/" + namespace + "/claim1parameters",
},
gvr("resource.k8s.io", "v1alpha2", "noderesourceslices"): {
gvr("resource.k8s.io", "v1alpha2", "resourceslices"): {
Stub: `{"metadata": {"name": "node1slice"}, "nodeName": "worker1", "driverName": "dra.example.com", "namedResources": {}}`,
ExpectedEtcdPath: "/registry/noderesourceslices/node1slice",
ExpectedEtcdPath: "/registry/resourceslices/node1slice",
},
// --

View File

@@ -128,7 +128,7 @@ type createResourceDriverOp struct {
Nodes string
// StructuredParameters is true if the controller that is built into the scheduler
// is used and the control-plane controller is not needed.
// Because we don't run the kubelet plugin, NodeResourceSlices must
// Because we don't run the kubelet plugin, ResourceSlices must
// get created for all nodes.
StructuredParameters bool
}
@@ -195,12 +195,12 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
if op.StructuredParameters {
for _, nodeName := range resources.Nodes {
slice := nodeResourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
_, err := tCtx.Client().ResourceV1alpha2().NodeResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
_, err := tCtx.Client().ResourceV1alpha2().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
tCtx.ExpectNoError(err, "create node resource slice")
}
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
err := tCtx.Client().ResourceV1alpha2().NodeResourceSlices().DeleteCollection(tCtx,
err := tCtx.Client().ResourceV1alpha2().ResourceSlices().DeleteCollection(tCtx,
metav1.DeleteOptions{},
metav1.ListOptions{FieldSelector: "driverName=" + op.DriverName},
)
@@ -228,8 +228,8 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
})
}
func nodeResourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.NodeResourceSlice {
slice := &resourcev1alpha2.NodeResourceSlice{
func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.ResourceSlice {
slice := &resourcev1alpha2.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},