remove scheduler component config v1beta1

Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
kerthcet
2021-09-28 13:13:17 +08:00
parent 6a71f85ccf
commit 75a255d2ed
28 changed files with 12 additions and 5683 deletions

View File

@@ -34,13 +34,11 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/kube-scheduler/config/v1beta1"
"k8s.io/kube-scheduler/config/v1beta2"
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/scheduler"
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
st "k8s.io/kubernetes/pkg/scheduler/testing"
testfwk "k8s.io/kubernetes/test/integration/framework"
@@ -49,126 +47,7 @@ import (
"k8s.io/utils/pointer"
)
func TestServiceAffinityEnqueue(t *testing.T) {
cfg := configtesting.V1beta1ToInternalWithDefaults(t, v1beta1.KubeSchedulerConfiguration{
Profiles: []v1beta1.KubeSchedulerProfile{{
SchedulerName: pointer.StringPtr(v1.DefaultSchedulerName),
Plugins: &v1beta1.Plugins{
PreFilter: &v1beta1.PluginSet{
Enabled: []v1beta1.Plugin{
{Name: serviceaffinity.Name},
},
},
Filter: &v1beta1.PluginSet{
Enabled: []v1beta1.Plugin{
{Name: serviceaffinity.Name},
},
},
},
PluginConfig: []v1beta1.PluginConfig{
{
Name: serviceaffinity.Name,
Args: runtime.RawExtension{
Object: &v1beta1.ServiceAffinityArgs{
AffinityLabels: []string{"hostname"},
},
},
},
},
}},
})
// Use zero backoff seconds to bypass backoffQ.
testCtx := testutils.InitTestSchedulerWithOptions(
t,
testutils.InitTestAPIServer(t, "serviceaffinity-enqueue", nil),
nil,
scheduler.WithProfiles(cfg.Profiles...),
scheduler.WithPodInitialBackoffSeconds(0),
scheduler.WithPodMaxBackoffSeconds(0),
)
testutils.SyncInformerFactory(testCtx)
// It's intended to not start the scheduler's queue, and hence to
// not start any flushing logic. We will pop and schedule the Pods manually later.
defer testutils.CleanupTest(t, testCtx)
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
// Create two Nodes.
for i := 1; i <= 2; i++ {
nodeName := fmt.Sprintf("node%d", i)
capacity := map[v1.ResourceName]string{v1.ResourcePods: "1"}
node := st.MakeNode().Name(nodeName).Label("hostname", nodeName).Capacity(capacity).Obj()
if _, err := cs.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create Node %q: %v", nodeName, err)
}
}
// Create a Service.
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc",
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{Port: int32(80)}},
Selector: map[string]string{"foo": "bar"},
},
}
if _, err := cs.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create Service %q: %v", svc.Name, err)
}
// Create two Pods.
pause := imageutils.GetPauseImageName()
for i := 1; i <= 2; i++ {
podName := fmt.Sprintf("pod%d", i)
pod := st.MakePod().Namespace(ns).Name(podName).Label("foo", "bar").Container(pause).Obj()
// Make Pod1 an assigned Pod.
if i == 1 {
pod.Spec.NodeName = fmt.Sprintf("node%d", i)
}
if _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
}
}
// Wait for pod2 to be present in the scheduling queue.
if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
return len(testCtx.Scheduler.SchedulingQueue.PendingPods()) == 1, nil
}); err != nil {
t.Fatal(err)
}
// Pop Pod2 out. It should be unschedulable.
podInfo := nextPodOrDie(t, testCtx)
fwk, ok := testCtx.Scheduler.Profiles[podInfo.Pod.Spec.SchedulerName]
if !ok {
t.Fatalf("Cannot find the profile for Pod %v", podInfo.Pod.Name)
}
// Schedule the Pod manually.
_, fitError := testCtx.Scheduler.Algorithm.Schedule(ctx, nil, fwk, framework.NewCycleState(), podInfo.Pod)
// The fitError is expected to be:
// 0/2 nodes are available: 1 Too many pods, 1 node(s) didn't match service affinity.
if fitError == nil {
t.Fatalf("Expect Pod %v to fail at scheduling.", podInfo.Pod.Name)
}
testCtx.Scheduler.Error(podInfo, fitError)
// Scheduling cycle is incremented from 0 to 1 after NextPod() is called, so
// pass a number larger than 1 to move Pod to unschedulableQ.
testCtx.Scheduler.SchedulingQueue.AddUnschedulableIfNotPresent(podInfo, 10)
// Trigger a Service event.
// We expect this event to trigger moving the test Pod from unschedulableQ to activeQ.
if err := cs.CoreV1().Services(ns).Delete(ctx, "svc", metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete service 'svc': %v", err)
}
// Now we should be able to pop the Pod from activeQ again.
podInfo = nextPodOrDie(t, testCtx)
if podInfo.Attempts != 2 {
t.Errorf("Expected the Pod to be attempted 2 times, but got %v", podInfo.Attempts)
}
}
// TODO(#105303): Add a test case to cover event registration for core API resources
var _ framework.FilterPlugin = &fakeCRPlugin{}
var _ framework.EnqueueExtensions = &fakeCRPlugin{}