DRA: bump API v1alpha2 -> v1alpha3

This is in preparation for revamping the resource.k8s.io completely. Because
there will be no support for transitioning from v1alpha2 to v1alpha3, the
roundtrip test data for that API in 1.29 and 1.30 gets removed.

Repeating the version in the import name of the API packages is not really
required. It was done for a while to support simpler grepping for usage of
alpha APIs, but there are better ways for that now. So during this transition,
"resourceapi" gets used instead of "resourcev1alpha3" and the version gets
dropped from informer and lister imports. The advantage is that the next bump
to v1beta1 will affect fewer source code lines.

Only source code where the version really matters (like API registration)
retains the versioned import.
This commit is contained in:
Patrick Ohly
2024-06-14 12:40:48 +02:00
parent 815efa2baa
commit b51d68bb87
269 changed files with 5226 additions and 6934 deletions

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClaim
metadata:
name: test-claim-{{.Index}}

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClaim
metadata:
name: test-claim-{{.Index}}

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClaimParameters
metadata:
name: test-claim-parameters

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClaimTemplate
metadata:
name: test-claim-template

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClaimTemplate
metadata:
name: test-claim-template

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClass
metadata:
name: test-class

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha2
apiVersion: resource.k8s.io/v1alpha3
kind: ResourceClass
metadata:
name: test-class

View File

@@ -22,7 +22,7 @@ import (
"path/filepath"
"sync"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
@@ -84,7 +84,7 @@ func (op *createResourceClaimsOp) requiredNamespaces() []string {
func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
tCtx.Logf("creating %d claims in namespace %q", op.Count, op.Namespace)
var claimTemplate *resourcev1alpha2.ResourceClaim
var claimTemplate *resourceapi.ResourceClaim
if err := getSpecFromFile(&op.TemplatePath, &claimTemplate); err != nil {
tCtx.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err)
}
@@ -92,7 +92,7 @@ func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
var mutex sync.Mutex
create := func(i int) {
err := func() error {
if _, err := tCtx.Client().ResourceV1alpha2().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
if _, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
return fmt.Errorf("create claim: %v", err)
}
return nil
@@ -197,11 +197,11 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
if op.StructuredParameters {
for _, nodeName := range resources.Nodes {
slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
_, err := tCtx.Client().ResourceV1alpha2().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
_, err := tCtx.Client().ResourceV1alpha3().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
tCtx.ExpectNoError(err, "create node resource slice")
}
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
err := tCtx.Client().ResourceV1alpha2().ResourceSlices().DeleteCollection(tCtx,
err := tCtx.Client().ResourceV1alpha3().ResourceSlices().DeleteCollection(tCtx,
metav1.DeleteOptions{},
metav1.ListOptions{FieldSelector: "driverName=" + op.DriverName},
)
@@ -229,8 +229,8 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
})
}
func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.ResourceSlice {
slice := &resourcev1alpha2.ResourceSlice{
func resourceSlice(driverName, nodeName string, capacity int) *resourceapi.ResourceSlice {
slice := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
@@ -238,14 +238,14 @@ func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.
NodeName: nodeName,
DriverName: driverName,
ResourceModel: resourcev1alpha2.ResourceModel{
NamedResources: &resourcev1alpha2.NamedResourcesResources{},
ResourceModel: resourceapi.ResourceModel{
NamedResources: &resourceapi.NamedResourcesResources{},
},
}
for i := 0; i < capacity; i++ {
slice.ResourceModel.NamedResources.Instances = append(slice.ResourceModel.NamedResources.Instances,
resourcev1alpha2.NamedResourcesInstance{
resourceapi.NamedResourcesInstance{
Name: fmt.Sprintf("instance-%d", i),
},
)

View File

@@ -87,7 +87,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
// except for DRA API group when needed.
runtimeConfig := []string{"api/alpha=false"}
if enabledFeatures[features.DynamicResourceAllocation] {
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha2=true")
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha3=true")
}
customFlags := []string{
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.