DRA: bump API v1alpha2 -> v1alpha3

This is in preparation for revamping the resource.k8s.io completely. Because
there will be no support for transitioning from v1alpha2 to v1alpha3, the
roundtrip test data for that API in 1.29 and 1.30 gets removed.

Repeating the version in the import name of the API packages is not really
required. It was done for a while to support simpler grepping for usage of
alpha APIs, but there are better ways for that now. So during this transition,
"resourceapi" gets used instead of "resourcev1alpha3" and the version gets
dropped from informer and lister imports. The advantage is that the next bump
to v1beta1 will affect fewer source code lines.

Only source code where the version really matters (like API registration)
retains the versioned import.
This commit is contained in:
Patrick Ohly
2024-06-14 12:40:48 +02:00
parent 815efa2baa
commit b51d68bb87
269 changed files with 5226 additions and 6934 deletions

View File

@@ -20,7 +20,7 @@ import (
"fmt"
"sync"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/cm/dra/state"
@@ -47,14 +47,14 @@ type claimInfoCache struct {
}
// newClaimInfoFromClaim creates a new claim info from a resource claim.
func newClaimInfoFromClaim(claim *resourcev1alpha2.ResourceClaim) *ClaimInfo {
func newClaimInfoFromClaim(claim *resourceapi.ResourceClaim) *ClaimInfo {
// Grab the allocation.resourceHandles. If there are no
// allocation.resourceHandles, create a single resourceHandle with no
// content. This will trigger processing of this claim by a single
// kubelet plugin whose name matches resourceClaim.Status.DriverName.
resourceHandles := claim.Status.Allocation.ResourceHandles
if len(resourceHandles) == 0 {
resourceHandles = make([]resourcev1alpha2.ResourceHandle, 1)
resourceHandles = make([]resourceapi.ResourceHandle, 1)
}
claimInfoState := state.ClaimInfoState{
DriverName: claim.Status.DriverName,

View File

@@ -25,7 +25,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
@@ -44,24 +44,24 @@ func TestNewClaimInfoFromClaim(t *testing.T) {
for _, test := range []struct {
description string
claim *resourcev1alpha2.ResourceClaim
claim *resourceapi.ResourceClaim
expectedResult *ClaimInfo
}{
{
description: "successfully created object",
claim: &resourcev1alpha2.ResourceClaim{
claim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
UID: claimUID,
Name: claimName,
Namespace: namespace,
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{},
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{},
},
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: className,
},
},
@@ -73,7 +73,7 @@ func TestNewClaimInfoFromClaim(t *testing.T) {
ClaimName: claimName,
Namespace: claimName,
PodUIDs: sets.New[string](),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{},
},
CDIDevices: make(map[string][]string),
@@ -82,17 +82,17 @@ func TestNewClaimInfoFromClaim(t *testing.T) {
},
{
description: "successfully created object with empty allocation",
claim: &resourcev1alpha2.ResourceClaim{
claim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
UID: claimUID,
Name: claimName,
Namespace: namespace,
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{},
Allocation: &resourceapi.AllocationResult{},
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: className,
},
},
@@ -104,7 +104,7 @@ func TestNewClaimInfoFromClaim(t *testing.T) {
ClaimName: claimName,
Namespace: claimName,
PodUIDs: sets.New[string](),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{},
},
CDIDevices: make(map[string][]string),
@@ -136,7 +136,7 @@ func TestNewClaimInfoFromState(t *testing.T) {
ClaimName: "test-claim",
Namespace: "test-namespace",
PodUIDs: sets.New[string]("test-pod-uid"),
ResourceHandles: []resourcev1alpha2.ResourceHandle{},
ResourceHandles: []resourceapi.ResourceHandle{},
CDIDevices: map[string][]string{},
},
},

View File

@@ -22,7 +22,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
@@ -165,7 +165,7 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
continue
}
// Query claim object from the API server
resourceClaim, err := m.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get(
resourceClaim, err := m.kubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Get(
context.TODO(),
*claimName,
metav1.GetOptions{})

View File

@@ -30,7 +30,7 @@ import (
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
v1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
@@ -344,7 +344,7 @@ func TestPrepareResources(t *testing.T) {
driverName string
pod *v1.Pod
claimInfo *ClaimInfo
resourceClaim *resourcev1alpha2.ResourceClaim
resourceClaim *resourceapi.ResourceClaim
resp *drapb.NodePrepareResourcesResponse
wantErr bool
wantTimeout bool
@@ -408,23 +408,23 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-1",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -463,23 +463,23 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-nil",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -520,23 +520,23 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-empty",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -566,19 +566,19 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-2",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
@@ -607,23 +607,23 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-3",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -671,23 +671,23 @@ func TestPrepareResources(t *testing.T) {
},
prepared: true,
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-4",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -727,23 +727,23 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-5",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -789,23 +789,23 @@ func TestPrepareResources(t *testing.T) {
},
},
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim-6",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -858,28 +858,28 @@ func TestPrepareResources(t *testing.T) {
ClaimUID: "test-reserved",
Namespace: "test-namespace",
PodUIDs: sets.Set[string]{"test-reserved": sets.Empty{}},
ResourceHandles: []resourcev1alpha2.ResourceHandle{{Data: "test-data", DriverName: driverName}},
ResourceHandles: []resourceapi.ResourceHandle{{Data: "test-data", DriverName: driverName}},
},
annotations: make(map[string][]kubecontainer.Annotation),
prepared: false,
},
resourceClaim: &resourcev1alpha2.ResourceClaim{
resourceClaim: &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod-claim",
Namespace: "test-namespace",
UID: "test-reserved",
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: "test-reserved"},
},
},
@@ -905,7 +905,7 @@ func TestPrepareResources(t *testing.T) {
}
if test.resourceClaim != nil {
if _, err := fakeKubeClient.ResourceV1alpha2().ResourceClaims(test.pod.Namespace).Create(context.Background(), test.resourceClaim, metav1.CreateOptions{}); err != nil {
if _, err := fakeKubeClient.ResourceV1alpha3().ResourceClaims(test.pod.Namespace).Create(context.Background(), test.resourceClaim, metav1.CreateOptions{}); err != nil {
t.Fatalf("failed to create ResourceClaim %s: %+v", test.resourceClaim.Name, err)
}
}
@@ -1020,7 +1020,7 @@ func TestUnprepareResources(t *testing.T) {
DriverName: driverName,
ClaimName: "another-claim-test",
Namespace: "test-namespace",
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: driverName,
Data: "test data",
@@ -1109,7 +1109,7 @@ func TestUnprepareResources(t *testing.T) {
DriverName: driverName,
ClaimName: "test-pod-claim-2",
Namespace: "test-namespace",
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: driverName,
Data: "test data",
@@ -1159,7 +1159,7 @@ func TestUnprepareResources(t *testing.T) {
DriverName: driverName,
ClaimName: "test-pod-claim-3",
Namespace: "test-namespace",
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: driverName,
Data: "test data",
@@ -1208,7 +1208,7 @@ func TestUnprepareResources(t *testing.T) {
DriverName: driverName,
ClaimName: "test-pod-claim",
Namespace: "test-namespace",
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: driverName,
Data: "test data",
@@ -1258,7 +1258,7 @@ func TestUnprepareResources(t *testing.T) {
ClaimName: "test-pod-claim-nil",
Namespace: "test-namespace",
ClaimUID: "test-reserved",
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: driverName,
Data: "test data",
@@ -1508,29 +1508,29 @@ func TestParallelPrepareUnprepareResources(t *testing.T) {
},
},
}
resourceClaim := &resourcev1alpha2.ResourceClaim{
resourceClaim := &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: claimName,
Namespace: nameSpace,
UID: types.UID(fmt.Sprintf("claim-%d", goRoutineNum)),
},
Spec: resourcev1alpha2.ResourceClaimSpec{
Spec: resourceapi.ResourceClaimSpec{
ResourceClassName: "test-class",
},
Status: resourcev1alpha2.ResourceClaimStatus{
Status: resourceapi.ResourceClaimStatus{
DriverName: driverName,
Allocation: &resourcev1alpha2.AllocationResult{
ResourceHandles: []resourcev1alpha2.ResourceHandle{
Allocation: &resourceapi.AllocationResult{
ResourceHandles: []resourceapi.ResourceHandle{
{Data: "test-data", DriverName: driverName},
},
},
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
{UID: podUID},
},
},
}
if _, err = fakeKubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Create(context.Background(), resourceClaim, metav1.CreateOptions{}); err != nil {
if _, err = fakeKubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Create(context.Background(), resourceClaim, metav1.CreateOptions{}); err != nil {
t.Errorf("failed to create ResourceClaim %s: %+v", resourceClaim.Name, err)
return
}

View File

@@ -102,7 +102,7 @@ func (h *RegistrationHandler) wipeResourceSlices(pluginName string) {
fieldSelector["driverName"] = pluginName
}
err = h.kubeClient.ResourceV1alpha2().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: fieldSelector.String()})
err = h.kubeClient.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: fieldSelector.String()})
switch {
case err == nil:
logger.V(3).Info("Deleted ResourceSlices", "fieldSelector", fieldSelector)

View File

@@ -20,7 +20,7 @@ import (
"fmt"
"sync"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
@@ -57,7 +57,7 @@ type ClaimInfoState struct {
PodUIDs sets.Set[string]
// ResourceHandles is a list of opaque resource data for processing by a specific kubelet plugin
ResourceHandles []resourcev1alpha2.ResourceHandle
ResourceHandles []resourceapi.ResourceHandle
// CDIDevices is a map of DriverName --> CDI devices returned by the
// GRPC API call NodePrepareResource

View File

@@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
resourceapi "k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
cmerrors "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
@@ -42,7 +42,7 @@ func assertStateEqual(t *testing.T, restoredState, expectedState ClaimInfoStateL
// TODO (https://github.com/kubernetes/kubernetes/issues/123552): reconsider what data gets stored in checkpoints and whether that is really necessary.
//
// As it stands now, a "v1" checkpoint contains data for types like the resourcev1alpha2.ResourceHandle
// As it stands now, a "v1" checkpoint contains data for types like the resourceapi.ResourceHandle
// which may change over time as new fields get added in a backward-compatible way (not unusual
// for API types). That breaks checksuming with pkg/util/hash because it is based on spew output.
// That output includes those new fields.
@@ -72,7 +72,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
ClaimName: "example",
Namespace: "default",
PodUIDs: sets.New("139cdb46-f989-4f17-9561-ca10cfb509a6"),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: "test-driver.cdi.k8s.io",
Data: `{"a": "b"}`,
@@ -96,7 +96,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
ClaimName: "example",
Namespace: "default",
PodUIDs: sets.New("139cdb46-f989-4f17-9561-ca10cfb509a6"),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: "test-driver-1.cdi.k8s.io",
Data: `{"a": "b"}`,
@@ -125,7 +125,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
ClaimName: "example-1",
Namespace: "default",
PodUIDs: sets.New("139cdb46-f989-4f17-9561-ca10cfb509a6"),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: "test-driver.cdi.k8s.io",
Data: `{"a": "b"}`,
@@ -142,7 +142,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
ClaimName: "example-2",
Namespace: "default",
PodUIDs: sets.New("139cdb46-f989-4f17-9561-ca10cfb509a6"),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: "test-driver.cdi.k8s.io",
Data: `{"c": "d"}`,
@@ -217,7 +217,7 @@ func TestCheckpointStateStore(t *testing.T) {
ClaimName: "example",
Namespace: "default",
PodUIDs: sets.New("139cdb46-f989-4f17-9561-ca10cfb509a6"),
ResourceHandles: []resourcev1alpha2.ResourceHandle{
ResourceHandles: []resourceapi.ResourceHandle{
{
DriverName: "test-driver.cdi.k8s.io",
Data: `{"a": "b"}`,

View File

@@ -22,7 +22,7 @@ limitations under the License.
package state
import (
v1alpha2 "k8s.io/api/resource/v1alpha2"
v1alpha3 "k8s.io/api/resource/v1alpha3"
sets "k8s.io/apimachinery/pkg/util/sets"
)
@@ -38,7 +38,7 @@ func (in *ClaimInfoState) DeepCopyInto(out *ClaimInfoState) {
}
if in.ResourceHandles != nil {
in, out := &in.ResourceHandles, &out.ResourceHandles
*out = make([]v1alpha2.ResourceHandle, len(*in))
*out = make([]v1alpha3.ResourceHandle, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}