api: resource.k8s.io v1alpha1 -> v1alpha2

For Kubernetes 1.27, we intend to make some breaking API changes:
- rename PodScheduling -> PodSchedulingHints (https://github.com/kubernetes/kubernetes/issues/114283)
- extend ResourceClaimStatus (https://github.com/kubernetes/enhancements/pull/3802)

We need to switch from v1alpha1 to v1alpha2 for that.
This commit is contained in:
Patrick Ohly
2023-03-06 12:43:58 +01:00
parent 152876a3eb
commit 29941b8d3e
140 changed files with 2497 additions and 2497 deletions

View File

@@ -28,7 +28,7 @@ import (
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -74,7 +74,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
ginkgo.By("waiting for container startup to fail")
parameters := b.parameters()
pod, template := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
b.create(ctx, parameters, pod, template)
@@ -97,7 +97,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
})
ginkgo.It("must not run a pod if a claim is not reserved for it", func(ctx context.Context) {
parameters := b.parameters()
claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate)
claim := b.externalClaim(resourcev1alpha2.AllocationModeImmediate)
pod := b.podExternal()
// This bypasses scheduling and therefore the pod gets
@@ -120,7 +120,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
})
ginkgo.It("must unprepare resources for force-deleted pod", func(ctx context.Context) {
parameters := b.parameters()
claim := b.externalClaim(resourcev1alpha1.AllocationModeImmediate)
claim := b.externalClaim(resourcev1alpha2.AllocationModeImmediate)
pod := b.podExternal()
zero := int64(0)
pod.Spec.TerminationGracePeriodSeconds = &zero
@@ -153,7 +153,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
ginkgo.It("supports claim and class parameters", func(ctx context.Context) {
classParameters := b.parameters("x", "y")
claimParameters := b.parameters()
pod, template := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
b.create(ctx, classParameters, claimParameters, pod, template)
@@ -168,7 +168,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
// claimTests tries out several different combinations of pods with
// claims, both inline and external.
claimTests := func(allocationMode resourcev1alpha1.AllocationMode) {
claimTests := func(allocationMode resourcev1alpha2.AllocationMode) {
ginkgo.It("supports simple pod referencing inline resource claim", func(ctx context.Context) {
parameters := b.parameters()
pod, template := b.podInline(allocationMode)
@@ -233,11 +233,11 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
}
ginkgo.Context("with delayed allocation", func() {
claimTests(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
claimTests(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
})
ginkgo.Context("with immediate allocation", func() {
claimTests(resourcev1alpha1.AllocationModeImmediate)
claimTests(resourcev1alpha2.AllocationModeImmediate)
})
})
@@ -273,7 +273,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
}
pod1 := createPod()
pod2 := createPod()
claim := b.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
b.create(ctx, parameters, claim, pod1, pod2)
for _, pod := range []*v1.Pod{pod1, pod2} {
@@ -293,7 +293,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
})
b := newBuilder(f, driver)
tests := func(allocationMode resourcev1alpha1.AllocationMode) {
tests := func(allocationMode resourcev1alpha2.AllocationMode) {
ginkgo.It("uses all resources", func(ctx context.Context) {
var objs = []klog.KMetadata{
b.parameters(),
@@ -335,11 +335,11 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
}
ginkgo.Context("with delayed allocation", func() {
tests(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
tests(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
})
ginkgo.Context("with immediate allocation", func() {
tests(resourcev1alpha1.AllocationModeImmediate)
tests(resourcev1alpha2.AllocationModeImmediate)
})
})
@@ -351,8 +351,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
MaxAllocations: 2,
Nodes: nodes.NodeNames,
AllocateWrapper: func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string,
handler func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error)) (result *resourcev1alpha1.AllocationResult, err error) {
AllocateWrapper: func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string,
handler func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error)) (result *resourcev1alpha2.AllocationResult, err error) {
return allocateWrapper(ctx, claim, claimParameters, class, classParameters, selectedNode, handler)
},
}
@@ -373,8 +373,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
defer cancel()
parameters := b.parameters()
claim1 := b.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
claim2 := b.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
claim1 := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
claim2 := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
pod1 := b.podExternal()
pod1.Spec.ResourceClaims = append(pod1.Spec.ResourceClaims,
v1.PodResourceClaim{
@@ -389,11 +389,11 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
blockClaim, cancelBlockClaim := context.WithCancel(ctx)
defer cancelBlockClaim()
var allocated int32
allocateWrapper = func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{},
class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string,
handler func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{},
class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error),
) (result *resourcev1alpha1.AllocationResult, err error) {
allocateWrapper = func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{},
class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string,
handler func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{},
class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error),
) (result *resourcev1alpha2.AllocationResult, err error) {
oldAllocated := atomic.AddInt32(&allocated, 0)
if oldAllocated == 1 && strings.HasPrefix(claim.Name, "external-claim") {
<-blockClaim.Done()
@@ -409,7 +409,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
ginkgo.By("waiting for one claim to be allocated")
var nodeSelector *v1.NodeSelector
gomega.Eventually(ctx, func(ctx context.Context) (int, error) {
claims, err := f.ClientSet.ResourceV1alpha1().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{})
claims, err := f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return 0, err
}
@@ -429,7 +429,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
// the node selector looks like and can
// directly access the key and value from it.
ginkgo.By(fmt.Sprintf("create second pod on the same node %s", nodeSelector))
pod2, template2 := b.podInline(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
pod2, template2 := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
req := nodeSelector.NodeSelectorTerms[0].MatchExpressions[0]
node := req.Values[0]
pod2.Spec.NodeSelector = map[string]string{req.Key: node}
@@ -476,8 +476,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
ginkgo.It("work", func(ctx context.Context) {
parameters1 := b1.parameters()
parameters2 := b2.parameters()
claim1 := b1.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
claim2 := b2.externalClaim(resourcev1alpha1.AllocationModeWaitForFirstConsumer)
claim1 := b1.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
claim2 := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
pod := b1.podExternal()
pod.Spec.ResourceClaims = append(pod.Spec.ResourceClaims,
v1.PodResourceClaim{
@@ -513,8 +513,8 @@ func (b *builder) className() string {
// class returns the resource class that the builder's other objects
// reference.
func (b *builder) class() *resourcev1alpha1.ResourceClass {
class := &resourcev1alpha1.ResourceClass{
func (b *builder) class() *resourcev1alpha2.ResourceClass {
class := &resourcev1alpha2.ResourceClass{
ObjectMeta: metav1.ObjectMeta{
Name: b.className(),
},
@@ -522,7 +522,7 @@ func (b *builder) class() *resourcev1alpha1.ResourceClass {
SuitableNodes: b.nodeSelector(),
}
if b.classParametersName != "" {
class.ParametersRef = &resourcev1alpha1.ResourceClassParametersReference{
class.ParametersRef = &resourcev1alpha2.ResourceClassParametersReference{
Kind: "ConfigMap",
Name: b.classParametersName,
Namespace: b.f.Namespace.Name,
@@ -551,19 +551,19 @@ func (b *builder) nodeSelector() *v1.NodeSelector {
// externalClaim returns external resource claim
// that test pods can reference
func (b *builder) externalClaim(allocationMode resourcev1alpha1.AllocationMode) *resourcev1alpha1.ResourceClaim {
func (b *builder) externalClaim(allocationMode resourcev1alpha2.AllocationMode) *resourcev1alpha2.ResourceClaim {
b.claimCounter++
name := "external-claim" + b.driver.NameSuffix // This is what podExternal expects.
if b.claimCounter > 1 {
name += fmt.Sprintf("-%d", b.claimCounter)
}
return &resourcev1alpha1.ResourceClaim{
return &resourcev1alpha2.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: resourcev1alpha1.ResourceClaimSpec{
Spec: resourcev1alpha2.ResourceClaimSpec{
ResourceClassName: b.className(),
ParametersRef: &resourcev1alpha1.ResourceClaimParametersReference{
ParametersRef: &resourcev1alpha2.ResourceClaimParametersReference{
Kind: "ConfigMap",
Name: b.parametersName(),
},
@@ -632,7 +632,7 @@ func (b *builder) pod() *v1.Pod {
}
// makePodInline adds an inline resource claim with default class name and parameters.
func (b *builder) podInline(allocationMode resourcev1alpha1.AllocationMode) (*v1.Pod, *resourcev1alpha1.ResourceClaimTemplate) {
func (b *builder) podInline(allocationMode resourcev1alpha2.AllocationMode) (*v1.Pod, *resourcev1alpha2.ResourceClaimTemplate) {
pod := b.pod()
pod.Spec.Containers[0].Name = "with-resource"
podClaimName := "my-inline-claim"
@@ -645,15 +645,15 @@ func (b *builder) podInline(allocationMode resourcev1alpha1.AllocationMode) (*v1
},
},
}
template := &resourcev1alpha1.ResourceClaimTemplate{
template := &resourcev1alpha2.ResourceClaimTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
},
Spec: resourcev1alpha1.ResourceClaimTemplateSpec{
Spec: resourcev1alpha1.ResourceClaimSpec{
Spec: resourcev1alpha2.ResourceClaimTemplateSpec{
Spec: resourcev1alpha2.ResourceClaimSpec{
ResourceClassName: b.className(),
ParametersRef: &resourcev1alpha1.ResourceClaimParametersReference{
ParametersRef: &resourcev1alpha2.ResourceClaimParametersReference{
Kind: "ConfigMap",
Name: b.parametersName(),
},
@@ -665,7 +665,7 @@ func (b *builder) podInline(allocationMode resourcev1alpha1.AllocationMode) (*v1
}
// podInlineMultiple returns a pod with inline resource claim referenced by 3 containers
func (b *builder) podInlineMultiple(allocationMode resourcev1alpha1.AllocationMode) (*v1.Pod, *resourcev1alpha1.ResourceClaimTemplate) {
func (b *builder) podInlineMultiple(allocationMode resourcev1alpha2.AllocationMode) (*v1.Pod, *resourcev1alpha2.ResourceClaimTemplate) {
pod, template := b.podInline(allocationMode)
pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy(), *pod.Spec.Containers[0].DeepCopy())
pod.Spec.Containers[1].Name = pod.Spec.Containers[1].Name + "-1"
@@ -706,16 +706,16 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) {
ginkgo.By(fmt.Sprintf("creating %T %s", obj, obj.GetName()), func() {
var err error
switch obj := obj.(type) {
case *resourcev1alpha1.ResourceClass:
_, err = b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha2.ResourceClass:
_, err = b.f.ClientSet.ResourceV1alpha2().ResourceClasses().Create(ctx, obj, metav1.CreateOptions{})
case *v1.Pod:
_, err = b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *v1.ConfigMap:
_, err = b.f.ClientSet.CoreV1().ConfigMaps(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha1.ResourceClaim:
_, err = b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha1.ResourceClaimTemplate:
_, err = b.f.ClientSet.ResourceV1alpha1().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha2.ResourceClaim:
_, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourcev1alpha2.ResourceClaimTemplate:
_, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
default:
framework.Fail(fmt.Sprintf("internal error, unsupported type %T", obj), 1)
}
@@ -763,7 +763,7 @@ func (b *builder) setUp() {
}
func (b *builder) tearDown(ctx context.Context) {
err := b.f.ClientSet.ResourceV1alpha1().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{})
err := b.f.ClientSet.ResourceV1alpha2().ResourceClasses().Delete(ctx, b.className(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete resource class")
// Before we allow the namespace and all objects in it do be deleted by
@@ -787,14 +787,14 @@ func (b *builder) tearDown(ctx context.Context) {
return b.listTestPods(ctx)
}).WithTimeout(time.Minute).Should(gomega.BeEmpty(), "remaining pods despite deletion")
claims, err := b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
claims, err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "get resource claims")
for _, claim := range claims.Items {
if claim.DeletionTimestamp != nil {
continue
}
ginkgo.By(fmt.Sprintf("deleting %T %s", &claim, klog.KObj(&claim)))
err := b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "delete claim")
}
@@ -806,8 +806,8 @@ func (b *builder) tearDown(ctx context.Context) {
}
ginkgo.By("waiting for claims to be deallocated and deleted")
gomega.Eventually(func() ([]resourcev1alpha1.ResourceClaim, error) {
claims, err := b.f.ClientSet.ResourceV1alpha1().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
gomega.Eventually(func() ([]resourcev1alpha2.ResourceClaim, error) {
claims, err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}

View File

@@ -13,7 +13,7 @@ nodes:
v: "5"
apiServer:
extraArgs:
runtime-config: "resource.k8s.io/v1alpha1=true"
runtime-config: "resource.k8s.io/v1alpha2=true"
- |
kind: InitConfiguration
nodeRegistration:

View File

@@ -55,7 +55,7 @@ kubelet<->dynamic resource allocation plugin interaction.
To try out the feature, build Kubernetes, then in one console run:
```console
RUNTIME_CONFIG="resource.k8s.io/v1alpha1" FEATURE_GATES=DynamicResourceAllocation=true ALLOW_PRIVILEGED=1 ./hack/local-up-cluster.sh -O
RUNTIME_CONFIG="resource.k8s.io/v1alpha2" FEATURE_GATES=DynamicResourceAllocation=true ALLOW_PRIVILEGED=1 ./hack/local-up-cluster.sh -O
```
In another:

View File

@@ -28,7 +28,7 @@ import (
"sync"
v1 "k8s.io/api/core/v1"
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
@@ -47,11 +47,11 @@ type Resources struct {
AllocateWrapper AllocateWrapperType
}
type AllocateWrapperType func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{},
class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string,
handler func(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{},
class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error),
) (result *resourcev1alpha1.AllocationResult, err error)
type AllocateWrapperType func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{},
class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string,
handler func(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{},
class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error),
) (result *resourcev1alpha2.AllocationResult, err error)
type ExampleController struct {
clientset kubernetes.Interface
@@ -122,7 +122,7 @@ func (c *ExampleController) GetNumDeallocations() int64 {
return c.numDeallocations
}
func (c *ExampleController) GetClassParameters(ctx context.Context, class *resourcev1alpha1.ResourceClass) (interface{}, error) {
func (c *ExampleController) GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) {
if class.ParametersRef != nil {
if class.ParametersRef.APIGroup != "" ||
class.ParametersRef.Kind != "ConfigMap" {
@@ -133,7 +133,7 @@ func (c *ExampleController) GetClassParameters(ctx context.Context, class *resou
return nil, nil
}
func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, class *resourcev1alpha1.ResourceClass, classParameters interface{}) (interface{}, error) {
func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) {
if claim.Spec.ParametersRef != nil {
if claim.Spec.ParametersRef.APIGroup != "" ||
claim.Spec.ParametersRef.Kind != "ConfigMap" {
@@ -152,7 +152,7 @@ func (c *ExampleController) readParametersFromConfigMap(ctx context.Context, nam
return configMap.Data, nil
}
func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) {
func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error) {
if c.resources.AllocateWrapper != nil {
return c.resources.AllocateWrapper(ctx, claim, claimParameters, class, classParameters, selectedNode, c.allocate)
}
@@ -160,7 +160,7 @@ func (c *ExampleController) Allocate(ctx context.Context, claim *resourcev1alpha
}
// allocate simply copies parameters as JSON map into ResourceHandle.
func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim, claimParameters interface{}, class *resourcev1alpha1.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha1.AllocationResult, err error) {
func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error) {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID)
defer func() {
logger.V(3).Info("done", "result", result, "err", err)
@@ -210,7 +210,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
}
}
allocation := &resourcev1alpha1.AllocationResult{
allocation := &resourcev1alpha2.AllocationResult{
Shareable: c.resources.Shareable,
}
p := parameters{
@@ -252,7 +252,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
return allocation, nil
}
func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alpha1.ResourceClaim) error {
func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error {
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Deallocate"), "claim", klog.KObj(claim), "uid", claim.UID)
c.mutex.Lock()
defer c.mutex.Unlock()

View File

@@ -2,7 +2,7 @@
# When using it instead of a functional one, scheduling a pod leads to:
# Warning FailedScheduling 16s default-scheduler 0/1 nodes are available: 1 excluded via potential node filter in resource class.
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClass
metadata:
name: example

View File

@@ -8,7 +8,7 @@ metadata:
data:
a: b
---
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClaim
metadata:
name: external-claim

View File

@@ -6,7 +6,7 @@ metadata:
data:
a: b
---
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClaimTemplate
metadata:
name: pause-template

View File

@@ -8,7 +8,7 @@ metadata:
data:
a: b
---
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClaimTemplate
metadata:
name: test-inline-claim-template

View File

@@ -8,7 +8,7 @@ metadata:
data:
a: b
---
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClaim
metadata:
name: shared-claim

View File

@@ -6,7 +6,7 @@ metadata:
data:
a: b
---
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClaim
metadata:
name: example

View File

@@ -1,4 +1,4 @@
apiVersion: resource.k8s.io/v1alpha1
apiVersion: resource.k8s.io/v1alpha2
kind: ResourceClass
metadata:
name: example