Enforce ReadWriteOncePod access mode during scheduling
Check the PVC ref count on the node info cache to determine if a pod's PVCs are in use. If they are and it is using ReadWriteOncePod, fail the request.
This commit is contained in:
@@ -22,7 +22,16 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
plugintesting "k8s.io/kubernetes/pkg/scheduler/framework/plugins/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestGCEDiskConflicts(t *testing.T) {
|
||||
@@ -64,7 +73,7 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
p, _ := New(nil, nil)
|
||||
p := newPlugin(t)
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
@@ -112,7 +121,7 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
p, _ := New(nil, nil)
|
||||
p := newPlugin(t)
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
@@ -166,7 +175,7 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
p, _ := New(nil, nil)
|
||||
p := newPlugin(t)
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
@@ -220,7 +229,7 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
p, _ := New(nil, nil)
|
||||
p := newPlugin(t)
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
@@ -228,3 +237,150 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessModeConflicts(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, true)()
|
||||
|
||||
podWithReadWriteOncePodPVC := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Required for querying lister for PVCs in the same namespace.
|
||||
Namespace: "default",
|
||||
Name: "pod-with-rwop",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "claim-with-rwop",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podWithReadWriteManyPVC := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Required for querying lister for PVCs in the same namespace.
|
||||
Namespace: "default",
|
||||
Name: "pod-with-rwx",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "claim-with-rwx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "node-1",
|
||||
},
|
||||
}
|
||||
|
||||
readWriteOncePodPVC := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "claim-with-rwop",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod},
|
||||
},
|
||||
}
|
||||
readWriteManyPVC := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "claim-with-rwx",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
existingNodes []*v1.Node
|
||||
existingPVCs []*v1.PersistentVolumeClaim
|
||||
enableReadWriteOncePod bool
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
name: "nothing",
|
||||
pod: &v1.Pod{},
|
||||
existingPods: []*v1.Pod{},
|
||||
existingNodes: []*v1.Node{},
|
||||
existingPVCs: []*v1.PersistentVolumeClaim{},
|
||||
enableReadWriteOncePod: true,
|
||||
wantStatus: nil,
|
||||
},
|
||||
{
|
||||
name: "failed to get PVC",
|
||||
pod: podWithReadWriteOncePodPVC,
|
||||
existingPods: []*v1.Pod{},
|
||||
existingNodes: []*v1.Node{},
|
||||
existingPVCs: []*v1.PersistentVolumeClaim{},
|
||||
enableReadWriteOncePod: true,
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "persistentvolumeclaim \"claim-with-rwop\" not found"),
|
||||
},
|
||||
{
|
||||
name: "no access mode conflict",
|
||||
pod: podWithReadWriteOncePodPVC,
|
||||
existingPods: []*v1.Pod{podWithReadWriteManyPVC},
|
||||
existingNodes: []*v1.Node{node},
|
||||
existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC, readWriteManyPVC},
|
||||
enableReadWriteOncePod: true,
|
||||
wantStatus: nil,
|
||||
},
|
||||
{
|
||||
name: "access mode conflict",
|
||||
pod: podWithReadWriteOncePodPVC,
|
||||
existingPods: []*v1.Pod{podWithReadWriteOncePodPVC, podWithReadWriteManyPVC},
|
||||
existingNodes: []*v1.Node{node},
|
||||
existingPVCs: []*v1.PersistentVolumeClaim{readWriteOncePodPVC, readWriteManyPVC},
|
||||
enableReadWriteOncePod: true,
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonReadWriteOncePodConflict),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
p := newPluginWithListers(t, test.existingPods, test.existingNodes, test.existingPVCs, test.enableReadWriteOncePod)
|
||||
gotStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), nil, test.pod)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %+v, want: %+v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newPlugin(t *testing.T) framework.Plugin {
|
||||
return newPluginWithListers(t, nil, nil, nil, true)
|
||||
}
|
||||
|
||||
func newPluginWithListers(t *testing.T, pods []*v1.Pod, nodes []*v1.Node, pvcs []*v1.PersistentVolumeClaim, enableReadWriteOncePod bool) framework.Plugin {
|
||||
ctx := context.Background()
|
||||
pluginFactory := func(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) {
|
||||
return New(plArgs, fh, feature.Features{
|
||||
EnableReadWriteOncePod: enableReadWriteOncePod,
|
||||
})
|
||||
}
|
||||
snapshot := cache.NewSnapshot(pods, nodes)
|
||||
|
||||
objects := make([]runtime.Object, 0, len(pvcs))
|
||||
for _, pvc := range pvcs {
|
||||
objects = append(objects, pvc)
|
||||
}
|
||||
|
||||
return plugintesting.SetupPluginWithInformers(ctx, t, pluginFactory, &config.InterPodAffinityArgs{}, snapshot, objects)
|
||||
}
|
||||
|
Reference in New Issue
Block a user