Scheduler now registers event handlers dynamically
- move clusterEventMap to Configurator - dynamic event handlers registration for core API resources - dynamic event handlers registration for custom resources
This commit is contained in:
@@ -17,18 +17,31 @@ limitations under the License.
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testfwk "k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@@ -148,3 +161,174 @@ func TestServiceAffinityEnqueue(t *testing.T) {
|
||||
t.Errorf("Expected the Pod to be attempted 2 times, but got %v", podInfo.Attempts)
|
||||
}
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &fakeCRPlugin{}
|
||||
var _ framework.EnqueueExtensions = &fakeCRPlugin{}
|
||||
|
||||
type fakeCRPlugin struct{}
|
||||
|
||||
func (f *fakeCRPlugin) Name() string {
|
||||
return "fakeCRPlugin"
|
||||
}
|
||||
|
||||
func (f *fakeCRPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
|
||||
return framework.NewStatus(framework.Unschedulable, "always fail")
|
||||
}
|
||||
|
||||
// EventsToRegister returns the possible events that may make a Pod
|
||||
// failed by this plugin schedulable.
|
||||
func (f *fakeCRPlugin) EventsToRegister() []framework.ClusterEvent {
|
||||
return []framework.ClusterEvent{
|
||||
{Resource: "foos.v1.example.com", ActionType: framework.All},
|
||||
}
|
||||
}
|
||||
|
||||
// TestCustomResourceEnqueue constructs a fake plugin that registers custom resources
|
||||
// to verify Pods failed by this plugin can be moved properly upon CR events.
|
||||
func TestCustomResourceEnqueue(t *testing.T) {
|
||||
// Start API Server with apiextensions supported.
|
||||
server := apiservertesting.StartTestServerOrDie(
|
||||
t, apiservertesting.NewDefaultTestServerOptions(),
|
||||
[]string{"--disable-admission-plugins=ServiceAccount,TaintNodesByCondition", "--runtime-config=api/all=true"},
|
||||
testfwk.SharedEtcd(),
|
||||
)
|
||||
testCtx := &testutils.TestContext{}
|
||||
testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background())
|
||||
testCtx.CloseFn = func() { server.TearDownFn() }
|
||||
|
||||
apiExtensionClient := apiextensionsclient.NewForConfigOrDie(server.ClientConfig)
|
||||
dynamicClient := dynamic.NewForConfigOrDie(server.ClientConfig)
|
||||
|
||||
// Create a Foo CRD.
|
||||
fooCRD := &apiextensionsv1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos.example.com",
|
||||
},
|
||||
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
|
||||
Group: "example.com",
|
||||
Scope: apiextensionsv1.NamespaceScoped,
|
||||
Names: apiextensionsv1.CustomResourceDefinitionNames{
|
||||
Plural: "foos",
|
||||
Kind: "Foo",
|
||||
},
|
||||
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
Schema: &apiextensionsv1.CustomResourceValidation{
|
||||
OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Properties: map[string]apiextensionsv1.JSONSchemaProps{
|
||||
"field": {Type: "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var err error
|
||||
fooCRD, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(testCtx.Ctx, fooCRD, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
registry := frameworkruntime.Registry{
|
||||
"fakeCRPlugin": func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
|
||||
return &fakeCRPlugin{}, nil
|
||||
},
|
||||
}
|
||||
profile := schedapi.KubeSchedulerProfile{
|
||||
SchedulerName: v1.DefaultSchedulerName,
|
||||
Plugins: &schedapi.Plugins{
|
||||
Filter: schedapi.PluginSet{
|
||||
Enabled: []schedapi.Plugin{
|
||||
{Name: "fakeCRPlugin"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCtx.KubeConfig = server.ClientConfig
|
||||
testCtx.ClientSet = kubernetes.NewForConfigOrDie(server.ClientConfig)
|
||||
testCtx.NS, err = testCtx.ClientSet.CoreV1().Namespaces().Create(testCtx.Ctx, &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("cr-enqueue-%v", string(uuid.NewUUID()))}}, metav1.CreateOptions{})
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
t.Fatalf("Failed to integration test ns: %v", err)
|
||||
}
|
||||
|
||||
// Use zero backoff seconds to bypass backoffQ.
|
||||
testCtx = testutils.InitTestSchedulerWithOptions(
|
||||
t,
|
||||
testCtx,
|
||||
nil,
|
||||
scheduler.WithProfiles(profile),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry),
|
||||
scheduler.WithPodInitialBackoffSeconds(0),
|
||||
scheduler.WithPodMaxBackoffSeconds(0),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
// It's intended to not start the scheduler's queue, and hence to
|
||||
// not start any flushing logic. We will pop and schedule the Pods manually later.
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
||||
// Create one Node.
|
||||
node := st.MakeNode().Name("fake-node").Obj()
|
||||
if _, err := cs.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create Node %q: %v", node.Name, err)
|
||||
}
|
||||
|
||||
// Create a testing Pod.
|
||||
pause := imageutils.GetPauseImageName()
|
||||
pod := st.MakePod().Namespace(ns).Name("fake-pod").Container(pause).Obj()
|
||||
if _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
// Wait for the testing Pod to be present in the scheduling queue.
|
||||
if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
return len(testCtx.Scheduler.SchedulingQueue.PendingPods()) == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Pop fake-pod out. It should be unschedulable.
|
||||
podInfo := testCtx.Scheduler.NextPod()
|
||||
fwk, ok := testCtx.Scheduler.Profiles[podInfo.Pod.Spec.SchedulerName]
|
||||
if !ok {
|
||||
t.Fatalf("Cannot find the profile for Pod %v", podInfo.Pod.Name)
|
||||
}
|
||||
// Schedule the Pod manually.
|
||||
_, fitError := testCtx.Scheduler.Algorithm.Schedule(ctx, fwk, framework.NewCycleState(), podInfo.Pod)
|
||||
// The fitError is expected to be non-nil as it failed the fakeCRPlugin plugin.
|
||||
if fitError == nil {
|
||||
t.Fatalf("Expect Pod %v to fail at scheduling.", podInfo.Pod.Name)
|
||||
}
|
||||
testCtx.Scheduler.Error(podInfo, fitError)
|
||||
|
||||
// Scheduling cycle is incremented from 0 to 1 after NextPod() is called, so
|
||||
// pass a number larger than 1 to move Pod to unschedulableQ.
|
||||
testCtx.Scheduler.SchedulingQueue.AddUnschedulableIfNotPresent(podInfo, 10)
|
||||
|
||||
// Trigger a Custom Resource event.
|
||||
// We expect this event to trigger moving the test Pod from unschedulableQ to activeQ.
|
||||
crdGVR := schema.GroupVersionResource{Group: fooCRD.Spec.Group, Version: fooCRD.Spec.Versions[0].Name, Resource: "foos"}
|
||||
crClient := dynamicClient.Resource(crdGVR).Namespace(ns)
|
||||
if _, err := crClient.Create(ctx, &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "example.com/v1",
|
||||
"kind": "Foo",
|
||||
"metadata": map[string]interface{}{"name": "foo1"},
|
||||
},
|
||||
}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create cr: %v", err)
|
||||
}
|
||||
|
||||
// Now we should be able to pop the Pod from activeQ again.
|
||||
podInfo = testCtx.Scheduler.NextPod()
|
||||
if podInfo.Attempts != 2 {
|
||||
t.Errorf("Expected the Pod to be attempted 2 times, but got %v", podInfo.Attempts)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user