api: resource.k8s.io PodScheduling -> PodSchedulingContext
The name "PodScheduling" was unusual because in contrast to most other names, it was impossible to put an article in front of it. Now PodSchedulingContext is used instead.
This commit is contained in:
@@ -50,8 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&ResourceClaimList{},
|
||||
&ResourceClaimTemplate{},
|
||||
&ResourceClaimTemplateList{},
|
||||
&PodScheduling{},
|
||||
&PodSchedulingList{},
|
||||
&PodSchedulingContext{},
|
||||
&PodSchedulingContextList{},
|
||||
)
|
||||
|
||||
// Add common types
|
||||
|
@@ -181,28 +181,28 @@ type ResourceClaimList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// PodScheduling objects hold information that is needed to schedule
|
||||
// PodSchedulingContext objects hold information that is needed to schedule
|
||||
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
|
||||
// mode.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type PodScheduling struct {
|
||||
type PodSchedulingContext struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec describes where resources for the Pod are needed.
|
||||
Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
|
||||
// Status describes where resources for the Pod can be allocated.
|
||||
// +optional
|
||||
Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// PodSchedulingSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingSpec struct {
|
||||
// PodSchedulingContextSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingContextSpec struct {
|
||||
// SelectedNode is the node for which allocation of ResourceClaims that
|
||||
// are referenced by the Pod and that use "WaitForFirstConsumer"
|
||||
// allocation is to be attempted.
|
||||
@@ -221,8 +221,8 @@ type PodSchedulingSpec struct {
|
||||
PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
|
||||
}
|
||||
|
||||
// PodSchedulingStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingStatus struct {
|
||||
// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingContextStatus struct {
|
||||
// ResourceClaims describes resource availability for each
|
||||
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
|
||||
// uses "WaitForFirstConsumer" allocation mode.
|
||||
@@ -257,22 +257,22 @@ type ResourceClaimSchedulingStatus struct {
|
||||
}
|
||||
|
||||
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
|
||||
// node lists that are stored in PodScheduling objects. This limit is part
|
||||
// node lists that are stored in PodSchedulingContext objects. This limit is part
|
||||
// of the API.
|
||||
const PodSchedulingNodeListMaxSize = 128
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// PodSchedulingList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingList struct {
|
||||
// PodSchedulingContextList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingContextList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of PodScheduling objects.
|
||||
Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
// Items is the list of PodSchedulingContext objects.
|
||||
Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
@@ -139,9 +139,9 @@ type controller struct {
|
||||
rcLister resourcev1alpha2listers.ResourceClassLister
|
||||
rcSynced cache.InformerSynced
|
||||
claimCache cache.MutationCache
|
||||
podSchedulingLister resourcev1alpha2listers.PodSchedulingLister
|
||||
schedulingCtxLister resourcev1alpha2listers.PodSchedulingContextLister
|
||||
claimSynced cache.InformerSynced
|
||||
podSchedulingSynced cache.InformerSynced
|
||||
schedulingCtxSynced cache.InformerSynced
|
||||
}
|
||||
|
||||
// TODO: make it configurable
|
||||
@@ -157,7 +157,7 @@ func New(
|
||||
logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller")
|
||||
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings()
|
||||
schedulingCtxInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
go func() {
|
||||
@@ -177,7 +177,7 @@ func New(
|
||||
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme,
|
||||
v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)})
|
||||
|
||||
// The work queue contains either keys for claims or PodScheduling objects.
|
||||
// The work queue contains either keys for claims or PodSchedulingContext objects.
|
||||
queue := workqueue.NewNamedRateLimitingQueue(
|
||||
workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s-queue", name))
|
||||
|
||||
@@ -199,8 +199,8 @@ func New(
|
||||
rcSynced: rcInformer.Informer().HasSynced,
|
||||
claimCache: claimCache,
|
||||
claimSynced: claimInformer.Informer().HasSynced,
|
||||
podSchedulingLister: podSchedulingInformer.Lister(),
|
||||
podSchedulingSynced: podSchedulingInformer.Informer().HasSynced,
|
||||
schedulingCtxLister: schedulingCtxInformer.Lister(),
|
||||
schedulingCtxSynced: schedulingCtxInformer.Informer().HasSynced,
|
||||
queue: queue,
|
||||
eventRecorder: eventRecorder,
|
||||
}
|
||||
@@ -209,11 +209,11 @@ func New(
|
||||
if loggerV6.Enabled() {
|
||||
resourceClaimLogger := klog.LoggerWithValues(loggerV6, "type", "ResourceClaim")
|
||||
_, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&resourceClaimLogger, ctrl))
|
||||
podSchedulingLogger := klog.LoggerWithValues(loggerV6, "type", "PodScheduling")
|
||||
_, _ = podSchedulingInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&podSchedulingLogger, ctrl))
|
||||
schedulingCtxLogger := klog.LoggerWithValues(loggerV6, "type", "PodSchedulingContext")
|
||||
_, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&schedulingCtxLogger, ctrl))
|
||||
} else {
|
||||
_, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
|
||||
_, _ = podSchedulingInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
|
||||
_, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl))
|
||||
}
|
||||
|
||||
return ctrl
|
||||
@@ -233,7 +233,7 @@ func resourceEventHandlerFuncs(logger *klog.Logger, ctrl *controller) cache.Reso
|
||||
|
||||
const (
|
||||
claimKeyPrefix = "claim:"
|
||||
podSchedulingKeyPrefix = "podscheduling:"
|
||||
schedulingCtxKeyPrefix = "schedulingCtx:"
|
||||
)
|
||||
|
||||
func (ctrl *controller) add(logger *klog.Logger, obj interface{}) {
|
||||
@@ -279,8 +279,8 @@ func getKey(obj interface{}) (string, error) {
|
||||
switch obj.(type) {
|
||||
case *resourcev1alpha2.ResourceClaim:
|
||||
prefix = claimKeyPrefix
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
prefix = podSchedulingKeyPrefix
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
prefix = schedulingCtxKeyPrefix
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected object: %T", obj)
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (ctrl *controller) Run(workers int) {
|
||||
|
||||
stopCh := ctrl.ctx.Done()
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.podSchedulingSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, ctrl.rcSynced, ctrl.claimSynced, ctrl.schedulingCtxSynced) {
|
||||
ctrl.logger.Error(nil, "Cannot sync caches")
|
||||
return
|
||||
}
|
||||
@@ -370,16 +370,16 @@ func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Ob
|
||||
return nil, err
|
||||
}
|
||||
obj, finalErr = claim, ctrl.syncClaim(ctx, claim)
|
||||
case podSchedulingKeyPrefix:
|
||||
podScheduling, err := ctrl.podSchedulingLister.PodSchedulings(namespace).Get(name)
|
||||
case schedulingCtxKeyPrefix:
|
||||
schedulingCtx, err := ctrl.schedulingCtxLister.PodSchedulingContexts(namespace).Get(name)
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
klog.FromContext(ctx).V(5).Info("PodScheduling was deleted, no need to process it")
|
||||
klog.FromContext(ctx).V(5).Info("PodSchedulingContext was deleted, no need to process it")
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
obj, finalErr = podScheduling, ctrl.syncPodScheduling(ctx, podScheduling)
|
||||
obj, finalErr = schedulingCtx, ctrl.syncPodSchedulingContexts(ctx, schedulingCtx)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -525,9 +525,9 @@ func (ctrl *controller) allocateClaim(ctx context.Context,
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if claim.Status.Allocation != nil {
|
||||
// This can happen when two PodScheduling objects trigger
|
||||
// This can happen when two PodSchedulingContext objects trigger
|
||||
// allocation attempts (first one wins) or when we see the
|
||||
// update of the PodScheduling object.
|
||||
// update of the PodSchedulingContext object.
|
||||
logger.V(5).Info("Claim already allocated, nothing to do")
|
||||
return nil
|
||||
}
|
||||
@@ -601,19 +601,19 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim
|
||||
}, nil
|
||||
}
|
||||
|
||||
// syncClaim determines which next action may be needed for a PodScheduling object
|
||||
// syncPodSchedulingContext determines which next action may be needed for a PodSchedulingContext object
|
||||
// and does it.
|
||||
func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *resourcev1alpha2.PodScheduling) error {
|
||||
func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
// Ignore deleted objects.
|
||||
if podScheduling.DeletionTimestamp != nil {
|
||||
logger.V(5).Info("PodScheduling marked for deletion")
|
||||
if schedulingCtx.DeletionTimestamp != nil {
|
||||
logger.V(5).Info("PodSchedulingContext marked for deletion")
|
||||
return nil
|
||||
}
|
||||
|
||||
if podScheduling.Spec.SelectedNode == "" &&
|
||||
len(podScheduling.Spec.PotentialNodes) == 0 {
|
||||
if schedulingCtx.Spec.SelectedNode == "" &&
|
||||
len(schedulingCtx.Spec.PotentialNodes) == 0 {
|
||||
// Nothing to do? Shouldn't occur.
|
||||
logger.V(5).Info("Waiting for scheduler to set fields")
|
||||
return nil
|
||||
@@ -621,8 +621,8 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
|
||||
// Check pod.
|
||||
// TODO (?): use an informer - only useful when many (most?) pods have claims
|
||||
// TODO (?): let the scheduler copy all claim names + UIDs into PodScheduling - then we don't need the pod
|
||||
pod, err := ctrl.kubeClient.CoreV1().Pods(podScheduling.Namespace).Get(ctx, podScheduling.Name, metav1.GetOptions{})
|
||||
// TODO (?): let the scheduler copy all claim names + UIDs into PodSchedulingContext - then we don't need the pod
|
||||
pod, err := ctrl.kubeClient.CoreV1().Pods(schedulingCtx.Namespace).Get(ctx, schedulingCtx.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -632,16 +632,16 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
}
|
||||
|
||||
// Still the owner?
|
||||
if !metav1.IsControlledBy(podScheduling, pod) {
|
||||
if !metav1.IsControlledBy(schedulingCtx, pod) {
|
||||
// Must be obsolete object, do nothing for it.
|
||||
logger.V(5).Info("Pod not owner, PodScheduling is obsolete")
|
||||
logger.V(5).Info("Pod not owner, PodSchedulingContext is obsolete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find all pending claims that are owned by us. We bail out if any of the pre-requisites
|
||||
// for pod scheduling (claims exist, classes exist, parameters exist) are not met.
|
||||
// The scheduler will do the same, except for checking parameters, so usually
|
||||
// everything should be ready once the PodScheduling object exists.
|
||||
// everything should be ready once the PodSchedulingContext object exists.
|
||||
var claims claimAllocations
|
||||
for _, podClaim := range pod.Spec.ResourceClaims {
|
||||
delayed, err := ctrl.checkPodClaim(ctx, pod, podClaim)
|
||||
@@ -665,12 +665,12 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
// and shouldn't, because those allocations might have to be undone to
|
||||
// pick a better node. If we don't need to allocate now, then we'll
|
||||
// simply report back the gather information.
|
||||
if len(podScheduling.Spec.PotentialNodes) > 0 {
|
||||
if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, podScheduling.Spec.PotentialNodes); err != nil {
|
||||
if len(schedulingCtx.Spec.PotentialNodes) > 0 {
|
||||
if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, schedulingCtx.Spec.PotentialNodes); err != nil {
|
||||
return fmt.Errorf("checking potential nodes: %v", err)
|
||||
}
|
||||
}
|
||||
selectedNode := podScheduling.Spec.SelectedNode
|
||||
selectedNode := schedulingCtx.Spec.SelectedNode
|
||||
logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode)
|
||||
if selectedNode != "" {
|
||||
unsuitable := false
|
||||
@@ -703,26 +703,26 @@ func (ctrl *controller) syncPodScheduling(ctx context.Context, podScheduling *re
|
||||
// TODO: replace with patching the array. We can do that without race conditions
|
||||
// because each driver is responsible for its own entries.
|
||||
modified := false
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
schedulingCtx = schedulingCtx.DeepCopy()
|
||||
for _, delayed := range claims {
|
||||
i := findClaim(podScheduling.Status.ResourceClaims, delayed.PodClaimName)
|
||||
i := findClaim(schedulingCtx.Status.ResourceClaims, delayed.PodClaimName)
|
||||
if i < 0 {
|
||||
// Add new entry.
|
||||
podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims,
|
||||
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
|
||||
resourcev1alpha2.ResourceClaimSchedulingStatus{
|
||||
Name: delayed.PodClaimName,
|
||||
UnsuitableNodes: delayed.UnsuitableNodes,
|
||||
})
|
||||
modified = true
|
||||
} else if stringsDiffer(podScheduling.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) {
|
||||
} else if stringsDiffer(schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) {
|
||||
// Update existing entry.
|
||||
podScheduling.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes
|
||||
schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = delayed.UnsuitableNodes
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
if modified {
|
||||
logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podScheduling", podScheduling)
|
||||
if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulings(podScheduling.Namespace).UpdateStatus(ctx, podScheduling, metav1.UpdateOptions{}); err != nil {
|
||||
logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podSchedulingCtx", schedulingCtx)
|
||||
if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("update unsuitable node status: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -55,10 +55,10 @@ func TestController(t *testing.T) {
|
||||
delayedClaim := claim.DeepCopy()
|
||||
delayedClaim.Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer
|
||||
podName := "pod"
|
||||
podKey := "podscheduling:default/pod"
|
||||
podKey := "schedulingCtx:default/pod"
|
||||
pod := createPod(podName, claimNamespace, nil)
|
||||
podClaimName := "my-pod-claim"
|
||||
podScheduling := createPodScheduling(pod)
|
||||
podSchedulingCtx := createPodSchedulingContexts(pod)
|
||||
podWithClaim := createPod(podName, claimNamespace, map[string]string{podClaimName: claimName})
|
||||
nodeName := "worker"
|
||||
otherNodeName := "worker-2"
|
||||
@@ -96,22 +96,22 @@ func TestController(t *testing.T) {
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
}
|
||||
withSelectedNode := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Spec.SelectedNode = nodeName
|
||||
return podScheduling
|
||||
withSelectedNode := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
podSchedulingCtx = podSchedulingCtx.DeepCopy()
|
||||
podSchedulingCtx.Spec.SelectedNode = nodeName
|
||||
return podSchedulingCtx
|
||||
}
|
||||
withUnsuitableNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Status.ResourceClaims = append(podScheduling.Status.ResourceClaims,
|
||||
withUnsuitableNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
podSchedulingCtx = podSchedulingCtx.DeepCopy()
|
||||
podSchedulingCtx.Status.ResourceClaims = append(podSchedulingCtx.Status.ResourceClaims,
|
||||
resourcev1alpha2.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes},
|
||||
)
|
||||
return podScheduling
|
||||
return podSchedulingCtx
|
||||
}
|
||||
withPotentialNodes := func(podScheduling *resourcev1alpha2.PodScheduling) *resourcev1alpha2.PodScheduling {
|
||||
podScheduling = podScheduling.DeepCopy()
|
||||
podScheduling.Spec.PotentialNodes = potentialNodes
|
||||
return podScheduling
|
||||
withPotentialNodes := func(podSchedulingCtx *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
podSchedulingCtx = podSchedulingCtx.DeepCopy()
|
||||
podSchedulingCtx.Spec.PotentialNodes = potentialNodes
|
||||
return podSchedulingCtx
|
||||
}
|
||||
|
||||
var m mockDriver
|
||||
@@ -121,7 +121,7 @@ func TestController(t *testing.T) {
|
||||
driver mockDriver
|
||||
classes []*resourcev1alpha2.ResourceClass
|
||||
pod *corev1.Pod
|
||||
podScheduling, expectedPodScheduling *resourcev1alpha2.PodScheduling
|
||||
schedulingCtx, expectedSchedulingCtx *resourcev1alpha2.PodSchedulingContext
|
||||
claim, expectedClaim *resourcev1alpha2.ResourceClaim
|
||||
expectedError string
|
||||
}{
|
||||
@@ -308,8 +308,8 @@ func TestController(t *testing.T) {
|
||||
"pod-nop": {
|
||||
key: podKey,
|
||||
pod: pod,
|
||||
podScheduling: withSelectedNode(podScheduling),
|
||||
expectedPodScheduling: withSelectedNode(podScheduling),
|
||||
schedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
|
||||
@@ -319,8 +319,8 @@ func TestController(t *testing.T) {
|
||||
claim: claim,
|
||||
expectedClaim: claim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: withSelectedNode(podScheduling),
|
||||
expectedPodScheduling: withSelectedNode(podScheduling),
|
||||
schedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedSchedulingCtx: withSelectedNode(podSchedulingCtx),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
|
||||
@@ -331,8 +331,8 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: delayedClaim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: podScheduling,
|
||||
expectedPodScheduling: podScheduling,
|
||||
schedulingCtx: podSchedulingCtx,
|
||||
expectedSchedulingCtx: podSchedulingCtx,
|
||||
},
|
||||
|
||||
// pod with delayed allocation, potential nodes -> provide unsuitable nodes
|
||||
@@ -342,11 +342,11 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: delayedClaim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: withPotentialNodes(podScheduling),
|
||||
schedulingCtx: withPotentialNodes(podSchedulingCtx),
|
||||
driver: m.expectClassParameters(map[string]interface{}{className: 1}).
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil),
|
||||
expectedPodScheduling: withUnsuitableNodes(withPotentialNodes(podScheduling)),
|
||||
expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
|
||||
@@ -356,8 +356,8 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: delayedClaim,
|
||||
pod: podWithClaim,
|
||||
podScheduling: withSelectedNode(withPotentialNodes(podScheduling)),
|
||||
expectedPodScheduling: withSelectedNode(withPotentialNodes(podScheduling)),
|
||||
schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedSchedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
|
||||
expectedError: `pod claim my-pod-claim: resourceclass.resource.k8s.io "mock-class" not found`,
|
||||
},
|
||||
|
||||
@@ -368,12 +368,12 @@ func TestController(t *testing.T) {
|
||||
claim: delayedClaim,
|
||||
expectedClaim: withReservedFor(withAllocate(delayedClaim), pod),
|
||||
pod: podWithClaim,
|
||||
podScheduling: withSelectedNode(withPotentialNodes(podScheduling)),
|
||||
schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)),
|
||||
driver: m.expectClassParameters(map[string]interface{}{className: 1}).
|
||||
expectClaimParameters(map[string]interface{}{claimName: 2}).
|
||||
expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil).
|
||||
expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}),
|
||||
expectedPodScheduling: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podScheduling))),
|
||||
expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))),
|
||||
expectedError: errPeriodic.Error(),
|
||||
},
|
||||
} {
|
||||
@@ -388,8 +388,8 @@ func TestController(t *testing.T) {
|
||||
if test.pod != nil {
|
||||
initialObjects = append(initialObjects, test.pod)
|
||||
}
|
||||
if test.podScheduling != nil {
|
||||
initialObjects = append(initialObjects, test.podScheduling)
|
||||
if test.schedulingCtx != nil {
|
||||
initialObjects = append(initialObjects, test.schedulingCtx)
|
||||
}
|
||||
if test.claim != nil {
|
||||
initialObjects = append(initialObjects, test.claim)
|
||||
@@ -398,7 +398,7 @@ func TestController(t *testing.T) {
|
||||
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulings()
|
||||
podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
// Order is important: on function exit, we first must
|
||||
// cancel, then wait (last-in-first-out).
|
||||
defer informerFactory.Shutdown()
|
||||
@@ -412,7 +412,7 @@ func TestController(t *testing.T) {
|
||||
require.NoError(t, claimInformer.Informer().GetStore().Add(obj), "add resource claim")
|
||||
case *corev1.Pod:
|
||||
require.NoError(t, podInformer.Informer().GetStore().Add(obj), "add pod")
|
||||
case *resourcev1alpha2.PodScheduling:
|
||||
case *resourcev1alpha2.PodSchedulingContext:
|
||||
require.NoError(t, podSchedulingInformer.Informer().GetStore().Add(obj), "add pod scheduling")
|
||||
default:
|
||||
t.Fatalf("unknown initialObject type: %+v", obj)
|
||||
@@ -427,7 +427,7 @@ func TestController(t *testing.T) {
|
||||
if !cache.WaitForCacheSync(ctx.Done(),
|
||||
informerFactory.Resource().V1alpha2().ResourceClasses().Informer().HasSynced,
|
||||
informerFactory.Resource().V1alpha2().ResourceClaims().Informer().HasSynced,
|
||||
informerFactory.Resource().V1alpha2().PodSchedulings().Informer().HasSynced,
|
||||
informerFactory.Resource().V1alpha2().PodSchedulingContexts().Informer().HasSynced,
|
||||
) {
|
||||
t.Fatal("could not sync caches")
|
||||
}
|
||||
@@ -449,11 +449,11 @@ func TestController(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, expectedClaims, claims.Items)
|
||||
|
||||
podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulings("").List(ctx, metav1.ListOptions{})
|
||||
podSchedulings, err := kubeClient.ResourceV1alpha2().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list pod schedulings")
|
||||
var expectedPodSchedulings []resourcev1alpha2.PodScheduling
|
||||
if test.expectedPodScheduling != nil {
|
||||
expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedPodScheduling)
|
||||
var expectedPodSchedulings []resourcev1alpha2.PodSchedulingContext
|
||||
if test.expectedSchedulingCtx != nil {
|
||||
expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedSchedulingCtx)
|
||||
}
|
||||
assert.Equal(t, expectedPodSchedulings, podSchedulings.Items)
|
||||
|
||||
@@ -620,9 +620,9 @@ func createPod(podName, podNamespace string, claims map[string]string) *corev1.P
|
||||
return pod
|
||||
}
|
||||
|
||||
func createPodScheduling(pod *corev1.Pod) *resourcev1alpha2.PodScheduling {
|
||||
func createPodSchedulingContexts(pod *corev1.Pod) *resourcev1alpha2.PodSchedulingContext {
|
||||
controller := true
|
||||
return &resourcev1alpha2.PodScheduling{
|
||||
return &resourcev1alpha2.PodSchedulingContext{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
|
Reference in New Issue
Block a user