volume scheduler: move reason strings into volume code

The scheduler doesn't really need to know in detail which reasons
rendered a node unusable for a node. All it needs from the volume
binder is a list of reasons that it then can present to the user.

This seems a bit cleaner. But the main reason for the change is that
it simplifies the checking of CSI inline volumes and perhaps later
capacity checking. Both will lead to new failure reasons, which then
can be added without changing the interface.
This commit is contained in:
Patrick Ohly
2020-02-14 13:40:29 +01:00
parent c73532c4f7
commit 6eb0b034ac
7 changed files with 174 additions and 216 deletions

View File

@@ -36,13 +36,6 @@ var _ framework.FilterPlugin = &VolumeBinding{}
// Name is the name of the plugin used in Registry and configurations.
const Name = "VolumeBinding"
const (
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
ErrReasonBindConflict = "node(s) didn't find available persistent volumes to bind"
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
ErrReasonNodeConflict = "node(s) had volume node affinity conflict"
)
// Name returns name of the plugin. It is used in logs, etc.
func (pl *VolumeBinding) Name() string {
return Name
@@ -79,19 +72,16 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
return nil
}
unboundSatisfied, boundSatisfied, err := pl.binder.Binder.FindPodVolumes(pod, node)
reasons, err := pl.binder.Binder.FindPodVolumes(pod, node)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
}
if !boundSatisfied || !unboundSatisfied {
if len(reasons) > 0 {
status := framework.NewStatus(framework.UnschedulableAndUnresolvable)
if !boundSatisfied {
status.AppendReason(ErrReasonNodeConflict)
}
if !unboundSatisfied {
status.AppendReason(ErrReasonBindConflict)
for _, reason := range reasons {
status.AppendReason(reason)
}
return status
}

View File

@@ -58,9 +58,7 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
AllBound: true,
FindUnboundSatsified: true,
FindBoundSatsified: true,
AllBound: true,
},
wantStatus: nil,
},
@@ -69,31 +67,25 @@ func TestVolumeBinding(t *testing.T) {
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: true,
FindReasons: []string{volumescheduling.ErrReasonBindConflict},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonBindConflict),
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict),
},
{
name: "bound and unbound unsatisfied",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: false,
FindBoundSatsified: false,
FindReasons: []string{volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict},
},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNodeConflict,
ErrReasonBindConflict),
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, volumescheduling.ErrReasonBindConflict, volumescheduling.ErrReasonNodeConflict),
},
{
name: "unbound/found matches/bind succeeds",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{
FindUnboundSatsified: true,
FindBoundSatsified: true,
},
wantStatus: nil,
name: "unbound/found matches/bind succeeds",
pod: &v1.Pod{Spec: volState},
node: &v1.Node{},
volumeBinderConfig: &volumescheduling.FakeVolumeBinderConfig{},
wantStatus: nil,
},
{
name: "predicate error",