DRA: bump API v1alpha2 -> v1alpha3
This is in preparation for revamping the resource.k8s.io completely. Because there will be no support for transitioning from v1alpha2 to v1alpha3, the roundtrip test data for that API in 1.29 and 1.30 gets removed. Repeating the version in the import name of the API packages is not really required. It was done for a while to support simpler grepping for usage of alpha APIs, but there are better ways for that now. So during this transition, "resourceapi" gets used instead of "resourcev1alpha3" and the version gets dropped from informer and lister imports. The advantage is that the next bump to v1beta1 will affect fewer source code lines. Only source code where the version really matters (like API registration) retains the versioned import.
This commit is contained in:
@@ -59,8 +59,8 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
// standard for []metav1.Condition
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
@@ -152,10 +152,10 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
|
||||
gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`,
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclasses"): `{"driverName": "other.example.com"}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{}`,
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`,
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`,
|
||||
|
||||
@@ -52,8 +52,8 @@ var statusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
// standard for []metav1.Condition
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
coordination "k8s.io/api/coordination/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -108,17 +108,17 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceClaims("ns").Create(context.TODO(), &v1alpha2.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mynamedresourceclaim"}, Spec: v1alpha2.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mynamedresourceclaim"}, Spec: resourceapi.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceClaims("ns").Create(context.TODO(), &v1alpha2.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mytemplatizedresourceclaim"}, Spec: v1alpha2.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mytemplatizedresourceclaim"}, Spec: resourceapi.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
model := v1alpha2.ResourceModel{NamedResources: &v1alpha2.NamedResourcesResources{}}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceSlices().Create(context.TODO(), &v1alpha2.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice1"}, NodeName: "node1", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
model := resourceapi.ResourceModel{NamedResources: &resourceapi.NamedResourcesResources{}}
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice1"}, NodeName: "node1", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceSlices().Create(context.TODO(), &v1alpha2.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice2"}, NodeName: "node2", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice2"}, NodeName: "node2", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -193,13 +193,13 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
}
|
||||
getResourceClaim := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.ResourceV1alpha2().ResourceClaims("ns").Get(context.TODO(), "mynamedresourceclaim", metav1.GetOptions{})
|
||||
_, err := client.ResourceV1alpha3().ResourceClaims("ns").Get(context.TODO(), "mynamedresourceclaim", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getResourceClaimTemplate := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.ResourceV1alpha2().ResourceClaims("ns").Get(context.TODO(), "mytemplatizedresourceclaim", metav1.GetOptions{})
|
||||
_, err := client.ResourceV1alpha3().ResourceClaims("ns").Get(context.TODO(), "mytemplatizedresourceclaim", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -209,7 +209,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
if nodeName != nil {
|
||||
listOptions.FieldSelector = "nodeName=" + *nodeName
|
||||
}
|
||||
return client.ResourceV1alpha2().ResourceSlices().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
|
||||
return client.ResourceV1alpha3().ResourceSlices().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
|
||||
}
|
||||
}
|
||||
addResourceClaimTemplateReference := func(client clientset.Interface) func() error {
|
||||
@@ -663,7 +663,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
expectAllowed(t, deleteResourceSliceCollection(csiNode1Client, ptr.To("node1")))
|
||||
|
||||
// One slice must have been deleted, the other not.
|
||||
slices, err := superuserClient.ResourceV1alpha2().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
slices, err := superuserClient.ResourceV1alpha3().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -676,7 +676,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// Superuser can delete.
|
||||
expectAllowed(t, deleteResourceSliceCollection(superuserClient, nil))
|
||||
slices, err = superuserClient.ResourceV1alpha2().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
slices, err = superuserClient.ResourceV1alpha3().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -403,32 +403,32 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/resource/v1alpha2
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): {
|
||||
// k8s.io/kubernetes/pkg/apis/resource/v1alpha3
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclasses"): {
|
||||
Stub: `{"metadata": {"name": "class1name"}, "driverName": "example.com"}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclasses/class1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): {
|
||||
Stub: `{"metadata": {"name": "claim1name"}, "spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaims/" + namespace + "/claim1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaimtemplates"): {
|
||||
Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): {
|
||||
Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`,
|
||||
ExpectedEtcdPath: "/registry/podschedulingcontexts/" + namespace + "/pod1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclassparameters"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclassparameters"): {
|
||||
Stub: `{"metadata": {"name": "class1parameters"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclassparameters/" + namespace + "/class1parameters",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimparameters"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaimparameters"): {
|
||||
Stub: `{"metadata": {"name": "claim1parameters"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimparameters/" + namespace + "/claim1parameters",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceslices"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceslices"): {
|
||||
Stub: `{"metadata": {"name": "node1slice"}, "nodeName": "worker1", "driverName": "dra.example.com", "namedResources": {}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceslices/node1slice",
|
||||
},
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -679,30 +679,30 @@ func TestPodSchedulingContextSSA(t *testing.T) {
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := testCtx.ClientSet.ResourceV1alpha2().ResourceClasses().DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil {
|
||||
if err := testCtx.ClientSet.ResourceV1alpha3().ResourceClasses().DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil {
|
||||
t.Errorf("Unexpected error deleting ResourceClasses: %v", err)
|
||||
}
|
||||
}()
|
||||
class := &resourcev1alpha2.ResourceClass{
|
||||
class := &resourceapi.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-class",
|
||||
},
|
||||
DriverName: "does-not-matter",
|
||||
}
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha2().ResourceClasses().Create(testCtx.Ctx, class, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha3().ResourceClasses().Create(testCtx.Ctx, class, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create class: %v", err)
|
||||
}
|
||||
|
||||
claim := &resourcev1alpha2.ResourceClaim{
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-claim",
|
||||
Namespace: testCtx.NS.Name,
|
||||
},
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: class.Name,
|
||||
},
|
||||
}
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha2().ResourceClaims(claim.Namespace).Create(testCtx.Ctx, claim, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha3().ResourceClaims(claim.Namespace).Create(testCtx.Ctx, claim, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create claim: %v", err)
|
||||
}
|
||||
|
||||
@@ -719,11 +719,11 @@ func TestPodSchedulingContextSSA(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check that the PodSchedulingContext exists and has a selected node.
|
||||
var schedulingCtx *resourcev1alpha2.PodSchedulingContext
|
||||
var schedulingCtx *resourceapi.PodSchedulingContext
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Microsecond, 30*time.Second, true,
|
||||
func(context.Context) (bool, error) {
|
||||
var err error
|
||||
schedulingCtx, err = testCtx.ClientSet.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Get(testCtx.Ctx, pod.Name, metav1.GetOptions{})
|
||||
schedulingCtx, err = testCtx.ClientSet.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Get(testCtx.Ctx, pod.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -756,12 +756,12 @@ func TestPodSchedulingContextSSA(t *testing.T) {
|
||||
|
||||
// Now force the scheduler to update the PodSchedulingContext by setting UnsuitableNodes so that
|
||||
// the selected node is not suitable.
|
||||
schedulingCtx.Status.ResourceClaims = []resourcev1alpha2.ResourceClaimSchedulingStatus{{
|
||||
schedulingCtx.Status.ResourceClaims = []resourceapi.ResourceClaimSchedulingStatus{{
|
||||
Name: podClaimName,
|
||||
UnsuitableNodes: []string{schedulingCtx.Spec.SelectedNode},
|
||||
}}
|
||||
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).UpdateStatus(testCtx.Ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).UpdateStatus(testCtx.Ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Unexpected PodSchedulingContext status update error: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: test-claim-{{.Index}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: test-claim-{{.Index}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimParameters
|
||||
metadata:
|
||||
name: test-claim-parameters
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: test-claim-template
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: test-claim-template
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClass
|
||||
metadata:
|
||||
name: test-class
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClass
|
||||
metadata:
|
||||
name: test-class
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -84,7 +84,7 @@ func (op *createResourceClaimsOp) requiredNamespaces() []string {
|
||||
func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
|
||||
tCtx.Logf("creating %d claims in namespace %q", op.Count, op.Namespace)
|
||||
|
||||
var claimTemplate *resourcev1alpha2.ResourceClaim
|
||||
var claimTemplate *resourceapi.ResourceClaim
|
||||
if err := getSpecFromFile(&op.TemplatePath, &claimTemplate); err != nil {
|
||||
tCtx.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err)
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
|
||||
var mutex sync.Mutex
|
||||
create := func(i int) {
|
||||
err := func() error {
|
||||
if _, err := tCtx.Client().ResourceV1alpha2().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
|
||||
if _, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("create claim: %v", err)
|
||||
}
|
||||
return nil
|
||||
@@ -197,11 +197,11 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||
if op.StructuredParameters {
|
||||
for _, nodeName := range resources.Nodes {
|
||||
slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
|
||||
_, err := tCtx.Client().ResourceV1alpha2().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
|
||||
_, err := tCtx.Client().ResourceV1alpha3().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
|
||||
tCtx.ExpectNoError(err, "create node resource slice")
|
||||
}
|
||||
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
|
||||
err := tCtx.Client().ResourceV1alpha2().ResourceSlices().DeleteCollection(tCtx,
|
||||
err := tCtx.Client().ResourceV1alpha3().ResourceSlices().DeleteCollection(tCtx,
|
||||
metav1.DeleteOptions{},
|
||||
metav1.ListOptions{FieldSelector: "driverName=" + op.DriverName},
|
||||
)
|
||||
@@ -229,8 +229,8 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||
})
|
||||
}
|
||||
|
||||
func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.ResourceSlice {
|
||||
slice := &resourcev1alpha2.ResourceSlice{
|
||||
func resourceSlice(driverName, nodeName string, capacity int) *resourceapi.ResourceSlice {
|
||||
slice := &resourceapi.ResourceSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
@@ -238,14 +238,14 @@ func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.
|
||||
NodeName: nodeName,
|
||||
DriverName: driverName,
|
||||
|
||||
ResourceModel: resourcev1alpha2.ResourceModel{
|
||||
NamedResources: &resourcev1alpha2.NamedResourcesResources{},
|
||||
ResourceModel: resourceapi.ResourceModel{
|
||||
NamedResources: &resourceapi.NamedResourcesResources{},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < capacity; i++ {
|
||||
slice.ResourceModel.NamedResources.Instances = append(slice.ResourceModel.NamedResources.Instances,
|
||||
resourcev1alpha2.NamedResourcesInstance{
|
||||
resourceapi.NamedResourcesInstance{
|
||||
Name: fmt.Sprintf("instance-%d", i),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -87,7 +87,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
|
||||
// except for DRA API group when needed.
|
||||
runtimeConfig := []string{"api/alpha=false"}
|
||||
if enabledFeatures[features.DynamicResourceAllocation] {
|
||||
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha2=true")
|
||||
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha3=true")
|
||||
}
|
||||
customFlags := []string{
|
||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -130,9 +130,9 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
|
||||
|
||||
func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() {
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
schedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
claimTemplateInformer := informerFactory.Resource().V1alpha2().ResourceClaimTemplates()
|
||||
schedulingInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts()
|
||||
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
||||
claimTemplateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
||||
claimController, err := resourceclaim.NewController(klog.FromContext(ctx), clientSet, podInformer, schedulingInformer, claimInformer, claimTemplateInformer)
|
||||
if err != nil {
|
||||
tb.Fatalf("Error creating claim controller: %v", err)
|
||||
@@ -512,7 +512,7 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
|
||||
options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority", "StorageObjectInUseProtection"}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
options.APIEnablement.RuntimeConfig = cliflag.ConfigurationMap{
|
||||
resourcev1alpha2.SchemeGroupVersion.String(): "true",
|
||||
resourceapi.SchemeGroupVersion.String(): "true",
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user