DRA: bump API v1alpha2 -> v1alpha3

This is in preparation for revamping the resource.k8s.io completely. Because
there will be no support for transitioning from v1alpha2 to v1alpha3, the
roundtrip test data for that API in 1.29 and 1.30 gets removed.

Repeating the version in the import name of the API packages is not really
required. It was done for a while to support simpler grepping for usage of
alpha APIs, but there are better ways for that now. So during this transition,
"resourceapi" gets used instead of "resourcev1alpha3" and the version gets
dropped from informer and lister imports. The advantage is that the next bump
to v1beta1 will affect fewer source code lines.

Only source code where the version really matters (like API registration)
retains the versioned import.
This commit is contained in:
Patrick Ohly
2024-06-14 12:40:48 +02:00
parent 815efa2baa
commit b51d68bb87
269 changed files with 5226 additions and 6934 deletions

View File

@@ -22,7 +22,7 @@ import (
"sync"
v1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
@@ -39,10 +39,10 @@ type ResourceModels struct {
NamedResources namedresourcesmodel.Model
}
// resourceSliceLister is the subset of resourcev1alpha2listers.ResourceSliceLister needed by
// resourceSliceLister is the subset of resourcelisters.ResourceSliceLister needed by
// newResourceModel.
type resourceSliceLister interface {
List(selector labels.Selector) (ret []*resourcev1alpha2.ResourceSlice, err error)
List(selector labels.Selector) (ret []*resourceapi.ResourceSlice, err error)
}
// assumeCacheLister is the subset of volumebinding.AssumeCache needed by newResourceModel.
@@ -72,14 +72,14 @@ func newResourceModel(logger klog.Logger, resourceSliceLister resourceSliceListe
objs := claimAssumeCache.List(nil)
for _, obj := range objs {
claim, ok := obj.(*resourcev1alpha2.ResourceClaim)
claim, ok := obj.(*resourceapi.ResourceClaim)
if !ok {
return nil, fmt.Errorf("got unexpected object of type %T from claim assume cache", obj)
}
if obj, ok := inFlightAllocations.Load(claim.UID); ok {
// If the allocation is in-flight, then we have to use the allocation
// from that claim.
claim = obj.(*resourcev1alpha2.ResourceClaim)
claim = obj.(*resourceapi.ResourceClaim)
}
if claim.Status.Allocation == nil {
continue
@@ -103,13 +103,13 @@ func newResourceModel(logger klog.Logger, resourceSliceLister resourceSliceListe
return model, nil
}
func newClaimController(logger klog.Logger, class *resourcev1alpha2.ResourceClass, classParameters *resourcev1alpha2.ResourceClassParameters, claimParameters *resourcev1alpha2.ResourceClaimParameters) (*claimController, error) {
func newClaimController(logger klog.Logger, class *resourceapi.ResourceClass, classParameters *resourceapi.ResourceClassParameters, claimParameters *resourceapi.ResourceClaimParameters) (*claimController, error) {
// Each node driver is separate from the others. Each driver may have
// multiple requests which need to be allocated together, so here
// we have to collect them per model.
type perDriverRequests struct {
parameters []runtime.RawExtension
requests []*resourcev1alpha2.NamedResourcesRequest
requests []*resourceapi.NamedResourcesRequest
}
namedresourcesRequests := make(map[string]perDriverRequests)
for i, request := range claimParameters.DriverRequests {
@@ -136,7 +136,7 @@ func newClaimController(logger klog.Logger, class *resourcev1alpha2.ResourceClas
namedresources: make(map[string]perDriverController, len(namedresourcesRequests)),
}
for driverName, perDriver := range namedresourcesRequests {
var filter *resourcev1alpha2.NamedResourcesFilter
var filter *resourceapi.NamedResourcesFilter
for _, f := range classParameters.Filters {
if f.DriverName == driverName && f.ResourceFilterModel.NamedResources != nil {
filter = f.ResourceFilterModel.NamedResources
@@ -158,9 +158,9 @@ func newClaimController(logger klog.Logger, class *resourcev1alpha2.ResourceClas
// claimController currently wraps exactly one structured parameter model.
type claimController struct {
class *resourcev1alpha2.ResourceClass
classParameters *resourcev1alpha2.ResourceClassParameters
claimParameters *resourcev1alpha2.ResourceClaimParameters
class *resourceapi.ResourceClass
classParameters *resourceapi.ResourceClassParameters
claimParameters *resourceapi.ResourceClaimParameters
namedresources map[string]perDriverController
}
@@ -186,8 +186,8 @@ func (c claimController) nodeIsSuitable(ctx context.Context, nodeName string, re
return true, nil
}
func (c claimController) allocate(ctx context.Context, nodeName string, resources resources) (string, *resourcev1alpha2.AllocationResult, error) {
allocation := &resourcev1alpha2.AllocationResult{
func (c claimController) allocate(ctx context.Context, nodeName string, resources resources) (string, *resourceapi.AllocationResult, error) {
allocation := &resourceapi.AllocationResult{
Shareable: c.claimParameters.Shareable,
AvailableOnNodes: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
@@ -208,9 +208,9 @@ func (c claimController) allocate(ctx context.Context, nodeName string, resource
if err != nil {
return "", nil, fmt.Errorf("allocating via named resources structured model: %w", err)
}
handle := resourcev1alpha2.ResourceHandle{
handle := resourceapi.ResourceHandle{
DriverName: driverName,
StructuredData: &resourcev1alpha2.StructuredResourceHandle{
StructuredData: &resourceapi.StructuredResourceHandle{
NodeName: nodeName,
},
}
@@ -219,9 +219,9 @@ func (c claimController) allocate(ctx context.Context, nodeName string, resource
continue
}
handle.StructuredData.Results = append(handle.StructuredData.Results,
resourcev1alpha2.DriverAllocationResult{
resourceapi.DriverAllocationResult{
VendorRequestParameters: perDriver.parameters[i],
AllocationResultModel: resourcev1alpha2.AllocationResultModel{
AllocationResultModel: resourceapi.AllocationResultModel{
NamedResources: result,
},
},