scheduler: publish PodSchedulingContext during PreBind
Blocking API calls during a scheduling cycle like the DRA plugin is doing slow down overall scheduling, i.e. also affecting pods which don't use DRA. It is easy to move the blocking calls into a goroutine while the scheduling cycle ends with "pod unschedulable". The hard part is handling an error when those API calls then fail in the background. There is a solution for that (see https://github.com/kubernetes/kubernetes/pull/120963), but it's complex. Instead, publishing the modified PodSchedulingContext can also be done later. In the more common case of a pod which is ready for binding except for its claims, that'll be in PreBind, which runs in a separate goroutine already. In the less common case that a pod cannot be scheduled, that'll be in Unreserve which is still blocking.
This commit is contained in:
@@ -351,8 +351,8 @@ func TestPlugin(t *testing.T) {
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
status: framework.NewStatus(framework.Pending, `waiting for resource driver to allocate resource`),
|
||||
prebind: result{
|
||||
status: framework.NewStatus(framework.Pending, `waiting for resource driver`),
|
||||
added: []metav1.Object{schedulingSelectedPotential},
|
||||
},
|
||||
},
|
||||
@@ -365,8 +365,8 @@ func TestPlugin(t *testing.T) {
|
||||
claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim, pendingDelayedClaim2},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
status: framework.NewStatus(framework.Pending, `waiting for resource driver to provide information`),
|
||||
prebind: result{
|
||||
status: framework.NewStatus(framework.Pending, `waiting for resource driver`),
|
||||
added: []metav1.Object{schedulingPotential},
|
||||
},
|
||||
},
|
||||
@@ -379,8 +379,8 @@ func TestPlugin(t *testing.T) {
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
want: want{
|
||||
reserve: result{
|
||||
status: framework.NewStatus(framework.Pending, `waiting for resource driver to allocate resource`),
|
||||
prebind: result{
|
||||
status: framework.NewStatus(framework.Pending, `waiting for resource driver`),
|
||||
changes: change{
|
||||
scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
return st.FromPodSchedulingContexts(in).
|
||||
@@ -399,7 +399,7 @@ func TestPlugin(t *testing.T) {
|
||||
schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
|
||||
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
|
||||
prepare: prepare{
|
||||
reserve: change{
|
||||
prebind: change{
|
||||
scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
|
||||
// This does not actually conflict with setting the
|
||||
// selected node, but because the plugin is not using
|
||||
@@ -411,7 +411,7 @@ func TestPlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
reserve: result{
|
||||
prebind: result{
|
||||
status: framework.AsStatus(errors.New(`ResourceVersion must match the object that gets updated`)),
|
||||
},
|
||||
},
|
||||
|
Reference in New Issue
Block a user