DRA: bump API v1alpha2 -> v1alpha3
This is in preparation for revamping the resource.k8s.io completely. Because there will be no support for transitioning from v1alpha2 to v1alpha3, the roundtrip test data for that API in 1.29 and 1.30 gets removed. Repeating the version in the import name of the API packages is not really required. It was done for a while to support simpler grepping for usage of alpha APIs, but there are better ways for that now. So during this transition, "resourceapi" gets used instead of "resourcev1alpha3" and the version gets dropped from informer and lister imports. The advantage is that the next bump to v1beta1 will affect fewer source code lines. Only source code where the version really matters (like API registration) retains the versioned import.
This commit is contained in:
@@ -25,7 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -33,12 +33,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1apply "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
v1informers "k8s.io/client-go/informers/core/v1"
|
||||
resourcev1alpha2informers "k8s.io/client-go/informers/resource/v1alpha2"
|
||||
resourceinformers "k8s.io/client-go/informers/resource/v1alpha3"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
v1listers "k8s.io/client-go/listers/core/v1"
|
||||
resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2"
|
||||
resourcelisters "k8s.io/client-go/listers/resource/v1alpha3"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
@@ -78,7 +78,7 @@ type Controller struct {
|
||||
// claimLister is the shared ResourceClaim lister used to fetch and store ResourceClaim
|
||||
// objects from the API server. It is shared with other controllers and
|
||||
// therefore the ResourceClaim objects in its store should be treated as immutable.
|
||||
claimLister resourcev1alpha2listers.ResourceClaimLister
|
||||
claimLister resourcelisters.ResourceClaimLister
|
||||
claimsSynced cache.InformerSynced
|
||||
claimCache cache.MutationCache
|
||||
|
||||
@@ -92,14 +92,14 @@ type Controller struct {
|
||||
// fetch scheduling objects from the API server. It is shared with other
|
||||
// controllers and therefore the objects in its store should be treated
|
||||
// as immutable.
|
||||
podSchedulingLister resourcev1alpha2listers.PodSchedulingContextLister
|
||||
podSchedulingLister resourcelisters.PodSchedulingContextLister
|
||||
podSchedulingSynced cache.InformerSynced
|
||||
|
||||
// templateLister is the shared ResourceClaimTemplate lister used to
|
||||
// fetch template objects from the API server. It is shared with other
|
||||
// controllers and therefore the objects in its store should be treated
|
||||
// as immutable.
|
||||
templateLister resourcev1alpha2listers.ResourceClaimTemplateLister
|
||||
templateLister resourcelisters.ResourceClaimTemplateLister
|
||||
templatesSynced cache.InformerSynced
|
||||
|
||||
// podIndexer has the common PodResourceClaim indexer installed To
|
||||
@@ -127,9 +127,9 @@ func NewController(
|
||||
logger klog.Logger,
|
||||
kubeClient clientset.Interface,
|
||||
podInformer v1informers.PodInformer,
|
||||
podSchedulingInformer resourcev1alpha2informers.PodSchedulingContextInformer,
|
||||
claimInformer resourcev1alpha2informers.ResourceClaimInformer,
|
||||
templateInformer resourcev1alpha2informers.ResourceClaimTemplateInformer) (*Controller, error) {
|
||||
podSchedulingInformer resourceinformers.PodSchedulingContextInformer,
|
||||
claimInformer resourceinformers.ResourceClaimInformer,
|
||||
templateInformer resourceinformers.ResourceClaimTemplateInformer) (*Controller, error) {
|
||||
|
||||
ec := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
@@ -329,7 +329,7 @@ func (ec *Controller) podNeedsWork(pod *v1.Pod) (bool, string) {
|
||||
// - a user created a pod with spec.nodeName set, perhaps for testing
|
||||
// - some scheduler was used which is unaware of DRA
|
||||
// - DRA was not enabled in kube-scheduler (version skew, configuration)
|
||||
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer &&
|
||||
if claim.Spec.AllocationMode == resourceapi.AllocationModeWaitForFirstConsumer &&
|
||||
claim.Status.Allocation == nil {
|
||||
scheduling, err := ec.podSchedulingLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
@@ -359,7 +359,7 @@ func (ec *Controller) enqueueResourceClaim(logger klog.Logger, obj interface{},
|
||||
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = d.Obj
|
||||
}
|
||||
claim, ok := obj.(*resourcev1alpha2.ResourceClaim)
|
||||
claim, ok := obj.(*resourceapi.ResourceClaim)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -533,7 +533,7 @@ func (ec *Controller) syncPod(ctx context.Context, namespace, name string) error
|
||||
return err
|
||||
}
|
||||
}
|
||||
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer &&
|
||||
if claim.Spec.AllocationMode == resourceapi.AllocationModeWaitForFirstConsumer &&
|
||||
claim.Status.Allocation == nil {
|
||||
logger.V(5).Info("create PodSchedulingContext because claim needs to be allocated", "resourceClaim", klog.KObj(claim))
|
||||
return ec.ensurePodSchedulingContext(ctx, pod)
|
||||
@@ -633,7 +633,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
|
||||
"-" +
|
||||
podClaim.Name[0:len(podClaim.Name)*maxBaseLen/len(generateName)]
|
||||
}
|
||||
claim = &resourcev1alpha2.ResourceClaim{
|
||||
claim = &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: generateName,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
@@ -653,7 +653,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
|
||||
}
|
||||
metrics.ResourceClaimCreateAttempts.Inc()
|
||||
claimName := claim.Name
|
||||
claim, err = ec.kubeClient.ResourceV1alpha2().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{})
|
||||
claim, err = ec.kubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
metrics.ResourceClaimCreateFailures.Inc()
|
||||
return fmt.Errorf("create ResourceClaim %s: %v", claimName, err)
|
||||
@@ -674,7 +674,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
|
||||
// findPodResourceClaim looks for an existing ResourceClaim with the right
|
||||
// annotation (ties it to the pod claim) and the right ownership (ties it to
|
||||
// the pod).
|
||||
func (ec *Controller) findPodResourceClaim(pod *v1.Pod, podClaim v1.PodResourceClaim) (*resourcev1alpha2.ResourceClaim, error) {
|
||||
func (ec *Controller) findPodResourceClaim(pod *v1.Pod, podClaim v1.PodResourceClaim) (*resourceapi.ResourceClaim, error) {
|
||||
// Only claims owned by the pod will get returned here.
|
||||
claims, err := ec.claimCache.ByIndex(claimPodOwnerIndex, string(pod.UID))
|
||||
if err != nil {
|
||||
@@ -682,7 +682,7 @@ func (ec *Controller) findPodResourceClaim(pod *v1.Pod, podClaim v1.PodResourceC
|
||||
}
|
||||
deterministicName := pod.Name + "-" + podClaim.Name // Kubernetes <= 1.27 behavior.
|
||||
for _, claimObj := range claims {
|
||||
claim, ok := claimObj.(*resourcev1alpha2.ResourceClaim)
|
||||
claim, ok := claimObj.(*resourceapi.ResourceClaim)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected object of type %T returned by claim cache", claimObj)
|
||||
}
|
||||
@@ -714,7 +714,7 @@ func (ec *Controller) ensurePodSchedulingContext(ctx context.Context, pod *v1.Po
|
||||
return fmt.Errorf("retrieve PodSchedulingContext: %v", err)
|
||||
}
|
||||
if scheduling == nil {
|
||||
scheduling = &resourcev1alpha2.PodSchedulingContext{
|
||||
scheduling = &resourceapi.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
@@ -728,14 +728,14 @@ func (ec *Controller) ensurePodSchedulingContext(ctx context.Context, pod *v1.Po
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: resourcev1alpha2.PodSchedulingContextSpec{
|
||||
Spec: resourceapi.PodSchedulingContextSpec{
|
||||
SelectedNode: pod.Spec.NodeName,
|
||||
// There is no need for negotiation about
|
||||
// potential and suitable nodes anymore, so
|
||||
// PotentialNodes can be left empty.
|
||||
},
|
||||
}
|
||||
if _, err := ec.kubeClient.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Create(ctx, scheduling, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := ec.kubeClient.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Create(ctx, scheduling, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("create PodSchedulingContext %s: %w", klog.KObj(scheduling), err)
|
||||
}
|
||||
return nil
|
||||
@@ -744,7 +744,7 @@ func (ec *Controller) ensurePodSchedulingContext(ctx context.Context, pod *v1.Po
|
||||
if scheduling.Spec.SelectedNode != pod.Spec.NodeName {
|
||||
scheduling := scheduling.DeepCopy()
|
||||
scheduling.Spec.SelectedNode = pod.Spec.NodeName
|
||||
if _, err := ec.kubeClient.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Update(ctx, scheduling, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := ec.kubeClient.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Update(ctx, scheduling, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("update spec.selectedNode in PodSchedulingContext %s: %w", klog.KObj(scheduling), err)
|
||||
}
|
||||
}
|
||||
@@ -752,15 +752,15 @@ func (ec *Controller) ensurePodSchedulingContext(ctx context.Context, pod *v1.Po
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ec *Controller) reserveForPod(ctx context.Context, pod *v1.Pod, claim *resourcev1alpha2.ResourceClaim) error {
|
||||
func (ec *Controller) reserveForPod(ctx context.Context, pod *v1.Pod, claim *resourceapi.ResourceClaim) error {
|
||||
claim = claim.DeepCopy()
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resourcev1alpha2.ResourceClaimConsumerReference{
|
||||
resourceapi.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
})
|
||||
if _, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("reserve claim %s for pod: %w", klog.KObj(claim), err)
|
||||
}
|
||||
return nil
|
||||
@@ -779,7 +779,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
}
|
||||
|
||||
// Check if the ReservedFor entries are all still valid.
|
||||
valid := make([]resourcev1alpha2.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor))
|
||||
valid := make([]resourceapi.ResourceClaimConsumerReference, 0, len(claim.Status.ReservedFor))
|
||||
for _, reservedFor := range claim.Status.ReservedFor {
|
||||
if reservedFor.APIGroup == "" &&
|
||||
reservedFor.Resource == "pods" {
|
||||
@@ -838,7 +838,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
return fmt.Errorf("unsupported ReservedFor entry: %v", reservedFor)
|
||||
}
|
||||
|
||||
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourcev1alpha2.Finalizer)
|
||||
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourceapi.Finalizer)
|
||||
logger.V(5).Info("claim reserved for counts", "currentCount", len(claim.Status.ReservedFor), "claim", klog.KRef(namespace, name), "updatedCount", len(valid), "builtinController", builtinControllerFinalizer >= 0)
|
||||
if len(valid) < len(claim.Status.ReservedFor) {
|
||||
// This is not using a patch because we want the update to fail if anything
|
||||
@@ -864,13 +864,13 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
// for such claims and not checking for them keeps this code simpler.
|
||||
if len(valid) == 0 {
|
||||
if builtinControllerFinalizer >= 0 {
|
||||
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer ||
|
||||
if claim.Spec.AllocationMode == resourceapi.AllocationModeWaitForFirstConsumer ||
|
||||
claim.DeletionTimestamp != nil {
|
||||
// Allocated by scheduler with structured parameters. We can "deallocate"
|
||||
// by clearing the allocation.
|
||||
claim.Status.Allocation = nil
|
||||
}
|
||||
} else if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
|
||||
} else if claim.Spec.AllocationMode == resourceapi.AllocationModeWaitForFirstConsumer {
|
||||
// DRA driver controller in the control plane
|
||||
// needs to do the deallocation.
|
||||
claim.Status.DeallocationRequested = true
|
||||
@@ -879,17 +879,17 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
// with a control plane controller.
|
||||
}
|
||||
|
||||
claim, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
claim, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now also remove the finalizer if it is not needed anymore.
|
||||
// Note that the index may have changed as a result of the UpdateStatus call.
|
||||
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourcev1alpha2.Finalizer)
|
||||
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourceapi.Finalizer)
|
||||
if builtinControllerFinalizer >= 0 && claim.Status.Allocation == nil {
|
||||
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
|
||||
if _, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -901,14 +901,14 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
// deleted. As above we then need to clear the allocation.
|
||||
claim.Status.Allocation = nil
|
||||
var err error
|
||||
claim, err = ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
claim, err = ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Whether it was allocated or not, remove the finalizer to unblock removal.
|
||||
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
|
||||
_, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
_, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -929,7 +929,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
// We are certain that the owning pod is not going to need
|
||||
// the claim and therefore remove the claim.
|
||||
logger.V(5).Info("deleting unused generated claim", "claim", klog.KObj(claim), "pod", klog.KObj(pod))
|
||||
err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
|
||||
err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete claim %s: %w", klog.KObj(claim), err)
|
||||
}
|
||||
@@ -951,7 +951,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func owningPod(claim *resourcev1alpha2.ResourceClaim) (string, types.UID) {
|
||||
func owningPod(claim *resourceapi.ResourceClaim) (string, types.UID) {
|
||||
for _, owner := range claim.OwnerReferences {
|
||||
if ptr.Deref(owner.Controller, false) &&
|
||||
owner.APIVersion == "v1" &&
|
||||
@@ -993,7 +993,7 @@ func isPodDone(pod *v1.Pod) bool {
|
||||
// claimPodOwnerIndexFunc is an index function that returns the pod UIDs of
|
||||
// all pods which own the resource claim. Should only be one, though.
|
||||
func claimPodOwnerIndexFunc(obj interface{}) ([]string, error) {
|
||||
claim, ok := obj.(*resourcev1alpha2.ResourceClaim)
|
||||
claim, ok := obj.(*resourceapi.ResourceClaim)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -79,7 +79,7 @@ var (
|
||||
return pod
|
||||
}()
|
||||
|
||||
podSchedulingContext = resourcev1alpha2.PodSchedulingContext{
|
||||
podSchedulingContext = resourceapi.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPodName,
|
||||
Namespace: testNamespace,
|
||||
@@ -93,7 +93,7 @@ var (
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: resourcev1alpha2.PodSchedulingContextSpec{
|
||||
Spec: resourceapi.PodSchedulingContextSpec{
|
||||
SelectedNode: nodeName,
|
||||
},
|
||||
}
|
||||
@@ -107,13 +107,13 @@ func TestSyncHandler(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
claims []*resourcev1alpha2.ResourceClaim
|
||||
claimsInCache []*resourcev1alpha2.ResourceClaim
|
||||
claims []*resourceapi.ResourceClaim
|
||||
claimsInCache []*resourceapi.ResourceClaim
|
||||
pods []*v1.Pod
|
||||
podsLater []*v1.Pod
|
||||
templates []*resourcev1alpha2.ResourceClaimTemplate
|
||||
expectedClaims []resourcev1alpha2.ResourceClaim
|
||||
expectedPodSchedulingContexts []resourcev1alpha2.PodSchedulingContext
|
||||
templates []*resourceapi.ResourceClaimTemplate
|
||||
expectedClaims []resourceapi.ResourceClaim
|
||||
expectedPodSchedulingContexts []resourceapi.PodSchedulingContext
|
||||
expectedStatuses map[string][]v1.PodResourceClaimStatus
|
||||
expectedError bool
|
||||
expectedMetrics expectedMetrics
|
||||
@@ -121,9 +121,9 @@ func TestSyncHandler(t *testing.T) {
|
||||
{
|
||||
name: "create",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
key: podKey(testPodWithResource),
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -140,10 +140,10 @@ func TestSyncHandler(t *testing.T) {
|
||||
}
|
||||
return pod
|
||||
}()},
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
key: podKey(testPodWithResource),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
|
||||
claims: []*resourceapi.ResourceClaim{generatedTestClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -160,9 +160,9 @@ func TestSyncHandler(t *testing.T) {
|
||||
}
|
||||
return pod
|
||||
}()},
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
key: podKey(testPodWithResource),
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -181,8 +181,8 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "find-existing-claim-by-label",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
key: podKey(testPodWithResource),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
|
||||
claims: []*resourceapi.ResourceClaim{generatedTestClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -194,8 +194,8 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "find-existing-claim-by-name",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
key: podKey(testPodWithResource),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{testClaim},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaim},
|
||||
claims: []*resourceapi.ResourceClaim{testClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*testClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &testClaim.Name},
|
||||
@@ -207,7 +207,7 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "find-created-claim-in-cache",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
key: podKey(testPodWithResource),
|
||||
claimsInCache: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
|
||||
claimsInCache: []*resourceapi.ResourceClaim{generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -237,10 +237,10 @@ func TestSyncHandler(t *testing.T) {
|
||||
{
|
||||
name: "create-with-other-claim",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
key: podKey(testPodWithResource),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{otherNamespaceClaim},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*otherNamespaceClaim, *generatedTestClaim},
|
||||
claims: []*resourceapi.ResourceClaim{otherNamespaceClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*otherNamespaceClaim, *generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithResource.Name: {
|
||||
{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -252,14 +252,14 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "wrong-claim-owner",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
key: podKey(testPodWithResource),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{conflictingClaim},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*conflictingClaim},
|
||||
claims: []*resourceapi.ResourceClaim{conflictingClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*conflictingClaim},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "create-conflict",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
key: podKey(testPodWithResource),
|
||||
expectedMetrics: expectedMetrics{1, 1},
|
||||
expectedError: true,
|
||||
@@ -268,27 +268,27 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "stay-reserved-seen",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved},
|
||||
claims: []*resourceapi.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*testClaimReserved},
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
{
|
||||
name: "stay-reserved-not-seen",
|
||||
podsLater: []*v1.Pod{testPodWithResource},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved},
|
||||
claims: []*resourceapi.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*testClaimReserved},
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
{
|
||||
name: "clear-reserved-delayed-allocation",
|
||||
pods: []*v1.Pod{},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
claims: []*resourceapi.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claim := testClaimAllocated.DeepCopy()
|
||||
claim.Status.DeallocationRequested = true
|
||||
return []resourcev1alpha2.ResourceClaim{*claim}
|
||||
return []resourceapi.ResourceClaim{*claim}
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -296,12 +296,12 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "clear-reserved-delayed-allocation-structured",
|
||||
pods: []*v1.Pod{},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{structuredParameters(testClaimReserved)},
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
claims: []*resourceapi.ResourceClaim{structuredParameters(testClaimReserved)},
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claim := testClaimAllocated.DeepCopy()
|
||||
claim.Finalizers = []string{}
|
||||
claim.Status.Allocation = nil
|
||||
return []resourcev1alpha2.ResourceClaim{*claim}
|
||||
return []resourceapi.ResourceClaim{*claim}
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -309,27 +309,27 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "dont-clear-reserved-delayed-allocation-structured",
|
||||
pods: []*v1.Pod{testPodWithResource},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimReserved)
|
||||
claim = reserveClaim(claim, otherTestPod)
|
||||
return []*resourcev1alpha2.ResourceClaim{claim}
|
||||
return []*resourceapi.ResourceClaim{claim}
|
||||
}(),
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*structuredParameters(testClaimReserved)},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*structuredParameters(testClaimReserved)},
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
{
|
||||
name: "clear-reserved-immediate-allocation",
|
||||
pods: []*v1.Pod{},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claim := testClaimReserved.DeepCopy()
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
return []*resourcev1alpha2.ResourceClaim{claim}
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
return []*resourceapi.ResourceClaim{claim}
|
||||
}(),
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claim := testClaimAllocated.DeepCopy()
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
return []resourcev1alpha2.ResourceClaim{*claim}
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
return []resourceapi.ResourceClaim{*claim}
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -337,15 +337,15 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "clear-reserved-immediate-allocation-structured",
|
||||
pods: []*v1.Pod{},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimReserved.DeepCopy())
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
return []*resourcev1alpha2.ResourceClaim{claim}
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
return []*resourceapi.ResourceClaim{claim}
|
||||
}(),
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimAllocated.DeepCopy())
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
return []resourcev1alpha2.ResourceClaim{*claim}
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
return []resourceapi.ResourceClaim{*claim}
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -353,19 +353,19 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "clear-reserved-immediate-allocation-structured-deleted",
|
||||
pods: []*v1.Pod{},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimReserved.DeepCopy())
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
claim.DeletionTimestamp = &metav1.Time{}
|
||||
return []*resourcev1alpha2.ResourceClaim{claim}
|
||||
return []*resourceapi.ResourceClaim{claim}
|
||||
}(),
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimAllocated.DeepCopy())
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
claim.DeletionTimestamp = &metav1.Time{}
|
||||
claim.Finalizers = []string{}
|
||||
claim.Status.Allocation = nil
|
||||
return []resourcev1alpha2.ResourceClaim{*claim}
|
||||
return []resourceapi.ResourceClaim{*claim}
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -373,19 +373,19 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "immediate-allocation-structured-deleted",
|
||||
pods: []*v1.Pod{},
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimAllocated.DeepCopy())
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
claim.DeletionTimestamp = &metav1.Time{}
|
||||
return []*resourcev1alpha2.ResourceClaim{claim}
|
||||
return []*resourceapi.ResourceClaim{claim}
|
||||
}(),
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claim := structuredParameters(testClaimAllocated.DeepCopy())
|
||||
claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
claim.Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
claim.DeletionTimestamp = &metav1.Time{}
|
||||
claim.Finalizers = []string{}
|
||||
claim.Status.Allocation = nil
|
||||
return []resourcev1alpha2.ResourceClaim{*claim}
|
||||
return []resourceapi.ResourceClaim{*claim}
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -397,13 +397,13 @@ func TestSyncHandler(t *testing.T) {
|
||||
return pods
|
||||
}(),
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims := []*resourcev1alpha2.ResourceClaim{testClaimReserved.DeepCopy()}
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claims := []*resourceapi.ResourceClaim{testClaimReserved.DeepCopy()}
|
||||
claims[0].OwnerReferences = nil
|
||||
return claims
|
||||
}(),
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
claims := []resourcev1alpha2.ResourceClaim{*testClaimAllocated.DeepCopy()}
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claims := []resourceapi.ResourceClaim{*testClaimAllocated.DeepCopy()}
|
||||
claims[0].OwnerReferences = nil
|
||||
claims[0].Status.DeallocationRequested = true
|
||||
return claims
|
||||
@@ -418,16 +418,16 @@ func TestSyncHandler(t *testing.T) {
|
||||
return pods
|
||||
}(),
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: func() []*resourcev1alpha2.ResourceClaim {
|
||||
claims := []*resourcev1alpha2.ResourceClaim{testClaimReserved.DeepCopy()}
|
||||
claims: func() []*resourceapi.ResourceClaim {
|
||||
claims := []*resourceapi.ResourceClaim{testClaimReserved.DeepCopy()}
|
||||
claims[0].OwnerReferences = nil
|
||||
claims[0].Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
claims[0].Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
return claims
|
||||
}(),
|
||||
expectedClaims: func() []resourcev1alpha2.ResourceClaim {
|
||||
claims := []resourcev1alpha2.ResourceClaim{*testClaimAllocated.DeepCopy()}
|
||||
expectedClaims: func() []resourceapi.ResourceClaim {
|
||||
claims := []resourceapi.ResourceClaim{*testClaimAllocated.DeepCopy()}
|
||||
claims[0].OwnerReferences = nil
|
||||
claims[0].Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
|
||||
claims[0].Spec.AllocationMode = resourceapi.AllocationModeImmediate
|
||||
return claims
|
||||
}(),
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
@@ -436,8 +436,8 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "remove-reserved",
|
||||
pods: []*v1.Pod{testPod},
|
||||
key: claimKey(testClaimReservedTwice),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{testClaimReservedTwice},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved},
|
||||
claims: []*resourceapi.ResourceClaim{testClaimReservedTwice},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*testClaimReserved},
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
{
|
||||
@@ -448,7 +448,7 @@ func TestSyncHandler(t *testing.T) {
|
||||
return pods
|
||||
}(),
|
||||
key: claimKey(testClaimReserved),
|
||||
claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
|
||||
claims: []*resourceapi.ResourceClaim{testClaimReserved},
|
||||
expectedClaims: nil,
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
@@ -456,24 +456,24 @@ func TestSyncHandler(t *testing.T) {
|
||||
name: "trigger-allocation",
|
||||
pods: []*v1.Pod{testPodWithNodeName},
|
||||
key: podKey(testPodWithNodeName),
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
claims: []*resourceapi.ResourceClaim{generatedTestClaim},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithNodeName.Name: {
|
||||
{Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
},
|
||||
},
|
||||
expectedPodSchedulingContexts: []resourcev1alpha2.PodSchedulingContext{podSchedulingContext},
|
||||
expectedPodSchedulingContexts: []resourceapi.PodSchedulingContext{podSchedulingContext},
|
||||
expectedMetrics: expectedMetrics{0, 0},
|
||||
},
|
||||
{
|
||||
name: "add-reserved",
|
||||
pods: []*v1.Pod{testPodWithNodeName},
|
||||
key: podKey(testPodWithNodeName),
|
||||
templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
|
||||
claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaimAllocated},
|
||||
expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaimReserved},
|
||||
templates: []*resourceapi.ResourceClaimTemplate{template},
|
||||
claims: []*resourceapi.ResourceClaim{generatedTestClaimAllocated},
|
||||
expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaimReserved},
|
||||
expectedStatuses: map[string][]v1.PodResourceClaimStatus{
|
||||
testPodWithNodeName.Name: {
|
||||
{Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
|
||||
@@ -509,9 +509,9 @@ func TestSyncHandler(t *testing.T) {
|
||||
setupMetrics()
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
templateInformer := informerFactory.Resource().V1alpha2().ResourceClaimTemplates()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts()
|
||||
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
||||
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
||||
|
||||
ec, err := NewController(klog.FromContext(ctx), fakeKubeClient, podInformer, podSchedulingInformer, claimInformer, templateInformer)
|
||||
if err != nil {
|
||||
@@ -549,7 +549,7 @@ func TestSyncHandler(t *testing.T) {
|
||||
t.Fatalf("unexpected success")
|
||||
}
|
||||
|
||||
claims, err := fakeKubeClient.ResourceV1alpha2().ResourceClaims("").List(ctx, metav1.ListOptions{})
|
||||
claims, err := fakeKubeClient.ResourceV1alpha3().ResourceClaims("").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error while listing claims: %v", err)
|
||||
}
|
||||
@@ -571,7 +571,7 @@ func TestSyncHandler(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, tc.expectedStatuses, actualStatuses, "pod resource claim statuses")
|
||||
|
||||
scheduling, err := fakeKubeClient.ResourceV1alpha2().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
|
||||
scheduling, err := fakeKubeClient.ResourceV1alpha3().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error while listing claims: %v", err)
|
||||
}
|
||||
@@ -582,12 +582,12 @@ func TestSyncHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim {
|
||||
claim := &resourcev1alpha2.ResourceClaim{
|
||||
func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourceapi.ResourceClaim {
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: classname,
|
||||
AllocationMode: resourcev1alpha2.AllocationModeWaitForFirstConsumer,
|
||||
AllocationMode: resourceapi.AllocationModeWaitForFirstConsumer,
|
||||
},
|
||||
}
|
||||
if owner != nil {
|
||||
@@ -597,17 +597,17 @@ func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference)
|
||||
return claim
|
||||
}
|
||||
|
||||
func makeGeneratedClaim(podClaimName, generateName, namespace, classname string, createCounter int, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim {
|
||||
claim := &resourcev1alpha2.ResourceClaim{
|
||||
func makeGeneratedClaim(podClaimName, generateName, namespace, classname string, createCounter int, owner *metav1.OwnerReference) *resourceapi.ResourceClaim {
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", generateName, createCounter),
|
||||
GenerateName: generateName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"resource.kubernetes.io/pod-claim-name": podClaimName},
|
||||
},
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: classname,
|
||||
AllocationMode: resourcev1alpha2.AllocationModeWaitForFirstConsumer,
|
||||
AllocationMode: resourceapi.AllocationModeWaitForFirstConsumer,
|
||||
},
|
||||
}
|
||||
if owner != nil {
|
||||
@@ -617,26 +617,26 @@ func makeGeneratedClaim(podClaimName, generateName, namespace, classname string,
|
||||
return claim
|
||||
}
|
||||
|
||||
func allocateClaim(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
|
||||
func allocateClaim(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
|
||||
claim = claim.DeepCopy()
|
||||
claim.Status.Allocation = &resourcev1alpha2.AllocationResult{
|
||||
claim.Status.Allocation = &resourceapi.AllocationResult{
|
||||
Shareable: true,
|
||||
}
|
||||
return claim
|
||||
}
|
||||
|
||||
func structuredParameters(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
|
||||
func structuredParameters(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
|
||||
claim = claim.DeepCopy()
|
||||
// As far the controller is concerned, a claim was allocated by us if it has
|
||||
// this finalizer. For testing we don't need to update the allocation result.
|
||||
claim.Finalizers = append(claim.Finalizers, resourcev1alpha2.Finalizer)
|
||||
claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer)
|
||||
return claim
|
||||
}
|
||||
|
||||
func reserveClaim(claim *resourcev1alpha2.ResourceClaim, pod *v1.Pod) *resourcev1alpha2.ResourceClaim {
|
||||
func reserveClaim(claim *resourceapi.ResourceClaim, pod *v1.Pod) *resourceapi.ResourceClaim {
|
||||
claim = claim.DeepCopy()
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resourcev1alpha2.ResourceClaimConsumerReference{
|
||||
resourceapi.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
@@ -663,11 +663,11 @@ func makePod(name, namespace string, uid types.UID, podClaims ...v1.PodResourceC
|
||||
return pod
|
||||
}
|
||||
|
||||
func makeTemplate(name, namespace, classname string) *resourcev1alpha2.ResourceClaimTemplate {
|
||||
template := &resourcev1alpha2.ResourceClaimTemplate{
|
||||
func makeTemplate(name, namespace, classname string) *resourceapi.ResourceClaimTemplate {
|
||||
template := &resourceapi.ResourceClaimTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: resourcev1alpha2.ResourceClaimTemplateSpec{
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimTemplateSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: classname,
|
||||
},
|
||||
},
|
||||
@@ -679,7 +679,7 @@ func podKey(pod *v1.Pod) string {
|
||||
return podKeyPrefix + pod.Namespace + "/" + pod.Name
|
||||
}
|
||||
|
||||
func claimKey(claim *resourcev1alpha2.ResourceClaim) string {
|
||||
func claimKey(claim *resourceapi.ResourceClaim) string {
|
||||
return claimKeyPrefix + claim.Namespace + "/" + claim.Name
|
||||
}
|
||||
|
||||
@@ -695,7 +695,7 @@ func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference {
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeClaims(claims []resourcev1alpha2.ResourceClaim) []resourcev1alpha2.ResourceClaim {
|
||||
func normalizeClaims(claims []resourceapi.ResourceClaim) []resourceapi.ResourceClaim {
|
||||
sort.Slice(claims, func(i, j int) bool {
|
||||
if claims[i].Namespace < claims[j].Namespace {
|
||||
return true
|
||||
@@ -711,13 +711,13 @@ func normalizeClaims(claims []resourcev1alpha2.ResourceClaim) []resourcev1alpha2
|
||||
}
|
||||
if claims[i].Spec.AllocationMode == "" {
|
||||
// This emulates defaulting.
|
||||
claims[i].Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer
|
||||
claims[i].Spec.AllocationMode = resourceapi.AllocationModeWaitForFirstConsumer
|
||||
}
|
||||
}
|
||||
return claims
|
||||
}
|
||||
|
||||
func normalizeScheduling(scheduling []resourcev1alpha2.PodSchedulingContext) []resourcev1alpha2.PodSchedulingContext {
|
||||
func normalizeScheduling(scheduling []resourceapi.PodSchedulingContext) []resourceapi.PodSchedulingContext {
|
||||
sort.Slice(scheduling, func(i, j int) bool {
|
||||
return scheduling[i].Namespace < scheduling[j].Namespace ||
|
||||
scheduling[i].Name < scheduling[j].Name
|
||||
@@ -739,7 +739,7 @@ func createResourceClaimReactor() func(action k8stesting.Action) (handled bool,
|
||||
return func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
claim := action.(k8stesting.CreateAction).GetObject().(*resourcev1alpha2.ResourceClaim)
|
||||
claim := action.(k8stesting.CreateAction).GetObject().(*resourceapi.ResourceClaim)
|
||||
if claim.Name == "" && claim.GenerateName != "" {
|
||||
claim.Name = fmt.Sprintf("%s-%d", claim.GenerateName, nameCounter)
|
||||
}
|
||||
|
Reference in New Issue
Block a user