diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 28aac59bf28..72646863cd3 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -480,11 +480,12 @@ func TestDeleteFinalStateUnknown(t *testing.T) { } func TestExpectationsOnRecreate(t *testing.T) { - client := fake.NewSimpleClientset() - stopCh := make(chan struct{}) - defer close(stopCh) - _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := fake.NewSimpleClientset() + f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) dsc, err := NewDaemonSetsController( ctx, @@ -550,8 +551,8 @@ func TestExpectationsOnRecreate(t *testing.T) { t.Fatal(err) } - f.Start(stopCh) - for ty, ok := range f.WaitForCacheSync(stopCh) { + f.Start(ctx.Done()) + for ty, ok := range f.WaitForCacheSync(ctx.Done()) { if !ok { t.Fatalf("caches failed to sync: %v", ty) } diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 6c4d8e7fd21..a6e46eaa194 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -70,14 +70,18 @@ import ( "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/etcd" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) -func setup(t *testing.T, groupVersions ...schema.GroupVersion) (clientset.Interface, *restclient.Config, framework.TearDownFunc) { +func setup(t *testing.T, groupVersions ...schema.GroupVersion) (context.Context, clientset.Interface, *restclient.Config, framework.TearDownFunc) { return setupWithResources(t, groupVersions, nil) } -func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resources []schema.GroupVersionResource) (clientset.Interface, *restclient.Config, framework.TearDownFunc) { - return framework.StartTestServer(t, framework.TestServerSetup{ +func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resources []schema.GroupVersionResource) (context.Context, clientset.Interface, *restclient.Config, framework.TearDownFunc) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + + client, config, teardown := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { if len(groupVersions) > 0 || len(resources) > 0 { resourceConfig := controlplane.DefaultAPIResourceConfigSource() @@ -87,6 +91,13 @@ func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resou } }, }) + + newTeardown := func() { + cancel() + teardown() + } + + return ctx, client, config, newTeardown } func verifyStatusCode(t *testing.T, transport http.RoundTripper, verb, URL, body string, expectedStatusCode int) { @@ -147,7 +158,7 @@ var cascDel = ` ` func Test4xxStatusCodeInvalidPatch(t *testing.T) { - client, _, tearDownFn := setup(t) + ctx, client, _, tearDownFn := setup(t) defer tearDownFn() obj := []byte(`{ @@ -183,7 +194,7 @@ func Test4xxStatusCodeInvalidPatch(t *testing.T) { AbsPath("/apis/apps/v1"). Namespace("default"). Resource("deployments"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err != nil { t.Fatalf("Failed to create object: %v: %v", err, resp) } @@ -192,7 +203,7 @@ func Test4xxStatusCodeInvalidPatch(t *testing.T) { Namespace("default"). Resource("deployments"). Name("deployment"). - Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(context.TODO()) + Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(ctx) var statusCode int result.StatusCode(&statusCode) if statusCode != 422 { @@ -203,7 +214,7 @@ func Test4xxStatusCodeInvalidPatch(t *testing.T) { Namespace("default"). Resource("deployments"). Name("deployment"). - Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(context.TODO()) + Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(ctx) result.StatusCode(&statusCode) if statusCode != 422 { t.Fatalf("Expected status code to be 422, got %v (%#v)", statusCode, result) @@ -303,7 +314,7 @@ func TestHSTS(t *testing.T) { // Tests that the apiserver returns 202 status code as expected. func Test202StatusCode(t *testing.T) { - clientSet, kubeConfig, tearDownFn := setup(t) + ctx, clientSet, kubeConfig, tearDownFn := setup(t) defer tearDownFn() transport, err := restclient.TransportFor(kubeConfig) @@ -318,7 +329,7 @@ func Test202StatusCode(t *testing.T) { // 1. Create the resource without any finalizer and then delete it without setting DeleteOptions. // Verify that server returns 200 in this case. - rs, err := rsClient.Create(context.TODO(), newRS(ns.Name), metav1.CreateOptions{}) + rs, err := rsClient.Create(ctx, newRS(ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -328,7 +339,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the apiserver still returns 200 since DeleteOptions.OrphanDependents is not set. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = rsClient.Create(ctx, rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -337,7 +348,7 @@ func Test202StatusCode(t *testing.T) { // 3. Create the resource and then delete it with DeleteOptions.OrphanDependents=false. // Verify that the server still returns 200 since the resource is immediately deleted. rs = newRS(ns.Name) - rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = rsClient.Create(ctx, rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -347,7 +358,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the server returns 202 in this case. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = rsClient.Create(ctx, rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -363,12 +374,17 @@ var ( // TestListOptions ensures that list works as expected for valid and invalid combinations of limit, continue, // resourceVersion and resourceVersionMatch. func TestListOptions(t *testing.T) { + for _, watchCacheEnabled := range []bool{true, false} { t.Run(fmt.Sprintf("watchCacheEnabled=%t", watchCacheEnabled), func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() var storageTransport *storagebackend.TransportConfig - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.EnableWatchCache = watchCacheEnabled storageTransport = &opts.Etcd.StorageConfig.Transport @@ -604,11 +620,16 @@ func TestListResourceVersion0(t *testing.T) { watchCacheEnabled: false, }, } + for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.EnableWatchCache = tc.watchCacheEnabled }, @@ -623,7 +644,7 @@ func TestListResourceVersion0(t *testing.T) { for i := 0; i < 10; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -631,7 +652,7 @@ func TestListResourceVersion0(t *testing.T) { if tc.watchCacheEnabled { // poll until the watch cache has the full list in memory err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - list, err := clientSet.AppsV1().ReplicaSets(ns.Name).List(context.Background(), metav1.ListOptions{ResourceVersion: "0"}) + list, err := clientSet.AppsV1().ReplicaSets(ns.Name).List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -643,12 +664,12 @@ func TestListResourceVersion0(t *testing.T) { } pagerFn := func(opts metav1.ListOptions) (runtime.Object, error) { - return rsClient.List(context.TODO(), opts) + return rsClient.List(ctx, opts) } p := pager.New(pager.SimplePageFunc(pagerFn)) p.PageSize = 3 - listObj, _, err := p.List(context.Background(), metav1.ListOptions{ResourceVersion: "0"}) + listObj, _, err := p.List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { t.Fatalf("Unexpected list error: %v", err) } @@ -665,7 +686,7 @@ func TestListResourceVersion0(t *testing.T) { func TestAPIListChunking(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientSet, "list-paging", t) @@ -676,7 +697,7 @@ func TestAPIListChunking(t *testing.T) { for i := 0; i < 4; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -687,7 +708,7 @@ func TestAPIListChunking(t *testing.T) { PageSize: 1, PageFn: pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { calls++ - list, err := rsClient.List(context.TODO(), opts) + list, err := rsClient.List(ctx, opts) if err != nil { return nil, err } @@ -697,14 +718,14 @@ func TestAPIListChunking(t *testing.T) { if calls == 2 { rs := newRS(ns.Name) rs.Name = "test-5" - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } return list, err }), } - listObj, _, err := p.List(context.Background(), metav1.ListOptions{}) + listObj, _, err := p.List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -733,7 +754,7 @@ func TestAPIListChunking(t *testing.T) { func TestAPIListChunkingWithLabelSelector(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientSet, "list-paging-with-label-selector", t) @@ -746,7 +767,7 @@ func TestAPIListChunkingWithLabelSelector(t *testing.T) { rs.Name = fmt.Sprintf("test-%d", i) odd := i%2 != 0 rs.Labels = map[string]string{"odd-index": strconv.FormatBool(odd)} - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -757,7 +778,7 @@ func TestAPIListChunkingWithLabelSelector(t *testing.T) { PageSize: 1, PageFn: pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { calls++ - list, err := rsClient.List(context.TODO(), opts) + list, err := rsClient.List(ctx, opts) if err != nil { return nil, err } @@ -767,7 +788,7 @@ func TestAPIListChunkingWithLabelSelector(t *testing.T) { return list, err }), } - listObj, _, err := p.List(context.Background(), metav1.ListOptions{LabelSelector: "odd-index=true", Limit: 3}) + listObj, _, err := p.List(ctx, metav1.ListOptions{LabelSelector: "odd-index=true", Limit: 3}) if err != nil { t.Fatal(err) } @@ -806,7 +827,7 @@ func makeSecret(name string) *v1.Secret { } func TestNameInFieldSelector(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() numNamespaces := 3 @@ -814,11 +835,11 @@ func TestNameInFieldSelector(t *testing.T) { ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("ns%d", i), t) defer framework.DeleteNamespaceOrDie(clientSet, ns, t) - _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("foo"), metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets(ns.Name).Create(ctx, makeSecret("foo"), metav1.CreateOptions{}) if err != nil { t.Errorf("Couldn't create secret: %v", err) } - _, err = clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("bar"), metav1.CreateOptions{}) + _, err = clientSet.CoreV1().Secrets(ns.Name).Create(ctx, makeSecret("bar"), metav1.CreateOptions{}) if err != nil { t.Errorf("Couldn't create secret: %v", err) } @@ -865,7 +886,7 @@ func TestNameInFieldSelector(t *testing.T) { opts := metav1.ListOptions{ FieldSelector: tc.selector, } - secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(context.TODO(), opts) + secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(ctx, opts) if err != nil { t.Errorf("%s: Unexpected error: %v", tc.selector, err) } @@ -897,7 +918,7 @@ func TestMetadataClient(t *testing.T) { } defer tearDown() - clientset, kubeConfig, tearDownFn := setup(t) + ctx, clientset, kubeConfig, tearDownFn := setup(t) defer tearDownFn() apiExtensionClient, err := apiextensionsclient.NewForConfig(config) @@ -951,7 +972,7 @@ func TestMetadataClient(t *testing.T) { namespace := framework.CreateNamespaceOrDie(clientset, ns, t) defer framework.DeleteNamespaceOrDie(clientset, namespace, t) - svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(ns).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } @@ -964,7 +985,7 @@ func TestMetadataClient(t *testing.T) { }) client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services")) - items, err := client.Namespace(ns).List(context.TODO(), metav1.ListOptions{}) + items, err := client.Namespace(ns).List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -983,7 +1004,7 @@ func TestMetadataClient(t *testing.T) { } wrapper.resp = nil - item, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}) + item, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -994,7 +1015,7 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected response: %#v", wrapper.resp) } - item, err = client.Namespace(ns).Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) + item, err = client.Namespace(ns).Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -1002,11 +1023,11 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected object: %#v", item) } - if err := client.Namespace(ns).Delete(context.TODO(), "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { + if err := client.Namespace(ns).Delete(ctx, "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { t.Fatal(err) } - if _, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + if _, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatal(err) } }, @@ -1016,7 +1037,7 @@ func TestMetadataClient(t *testing.T) { want: func(t *testing.T) { ns := "metadata-crd" crclient := dynamicClient.Resource(crdGVR).Namespace(ns) - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{ + cr, err := crclient.Create(ctx, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "cr.bar.com/v1", "kind": "Foo", @@ -1041,7 +1062,7 @@ func TestMetadataClient(t *testing.T) { }) client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR) - items, err := client.Namespace(ns).List(context.TODO(), metav1.ListOptions{}) + items, err := client.Namespace(ns).List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -1060,7 +1081,7 @@ func TestMetadataClient(t *testing.T) { } wrapper.resp = nil - item, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}) + item, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -1071,7 +1092,7 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected response: %#v", wrapper.resp) } - item, err = client.Namespace(ns).Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) + item, err = client.Namespace(ns).Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -1079,10 +1100,10 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected object: %#v", item) } - if err := client.Namespace(ns).Delete(context.TODO(), "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { + if err := client.Namespace(ns).Delete(ctx, "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { t.Fatal(err) } - if _, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + if _, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatal(err) } }, @@ -1094,11 +1115,11 @@ func TestMetadataClient(t *testing.T) { namespace := framework.CreateNamespaceOrDie(clientset, ns, t) defer framework.DeleteNamespaceOrDie(clientset, namespace, t) - svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(ns).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(ns).Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } @@ -1110,7 +1131,7 @@ func TestMetadataClient(t *testing.T) { }) client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services")) - w, err := client.Namespace(ns).Watch(context.TODO(), metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true}) + w, err := client.Namespace(ns).Watch(ctx, metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true}) if err != nil { t.Fatal(err) } @@ -1147,7 +1168,7 @@ func TestMetadataClient(t *testing.T) { want: func(t *testing.T) { ns := "metadata-watch-crd" crclient := dynamicClient.Resource(crdGVR).Namespace(ns) - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{ + cr, err := crclient.Create(ctx, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "cr.bar.com/v1", "kind": "Foo", @@ -1167,7 +1188,7 @@ func TestMetadataClient(t *testing.T) { cfg := metadata.ConfigFor(config) client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR) - patched, err := client.Namespace(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}) + patched, err := client.Namespace(ns).Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -1182,7 +1203,7 @@ func TestMetadataClient(t *testing.T) { }) client = metadata.NewForConfigOrDie(cfg).Resource(crdGVR) - w, err := client.Namespace(ns).Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true}) + w, err := client.Namespace(ns).Watch(ctx, metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true}) if err != nil { t.Fatal(err) } @@ -1231,7 +1252,7 @@ func TestAPICRDProtobuf(t *testing.T) { } defer tearDown() - _, kubeConfig, tearDownFn := setup(t) + ctx, _, kubeConfig, tearDownFn := setup(t) defer tearDownFn() apiExtensionClient, err := apiextensionsclient.NewForConfig(config) @@ -1285,11 +1306,11 @@ func TestAPICRDProtobuf(t *testing.T) { name: "server returns 406 when asking for protobuf for CRDs, which dynamic client does not support", accept: "application/vnd.kubernetes.protobuf", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1314,11 +1335,11 @@ func TestAPICRDProtobuf(t *testing.T) { name: "server returns JSON when asking for protobuf and json for CRDs", accept: "application/vnd.kubernetes.protobuf,application/json", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1343,11 +1364,11 @@ func TestAPICRDProtobuf(t *testing.T) { accept: "application/vnd.kubernetes.protobuf", subresource: "status", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"3"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"3"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1373,11 +1394,11 @@ func TestAPICRDProtobuf(t *testing.T) { accept: "application/vnd.kubernetes.protobuf,application/json", subresource: "status", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-4"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-4"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"4"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"4"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1420,7 +1441,7 @@ func TestAPICRDProtobuf(t *testing.T) { w, err := client.Get(). Resource(resource).NamespaceIfScoped(obj.GetNamespace(), len(obj.GetNamespace()) > 0).Name(obj.GetName()).SubResource(tc.subresource). SetHeader("Accept", tc.accept). - Stream(context.TODO()) + Stream(ctx) if (tc.wantErr != nil) != (err != nil) { t.Fatalf("unexpected error: %v", err) } @@ -1445,7 +1466,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { } defer tearDown() - clientset, kubeConfig, tearDownFn := setup(t) + ctx, clientset, kubeConfig, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientset, testNamespace, t) @@ -1529,7 +1550,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { name: "v1 verify status subresource returns a table for CRDs", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := subresourcesCrclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-1"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) + cr, err := subresourcesCrclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-1"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } @@ -1541,7 +1562,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { name: "v1 verify scale subresource returns a table for CRDs", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := subresourcesCrclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-2"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) + cr, err := subresourcesCrclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-2"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } @@ -1576,7 +1597,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { }, }, } - rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(ctx, rc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create replicationcontroller: %v", err) } @@ -1611,7 +1632,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { }, }, } - rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(ctx, rc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create replicationcontroller: %v", err) } @@ -1645,7 +1666,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { SetHeader("Accept", tc.accept). Name(obj.GetName()). SubResource(tc.subresource). - Do(context.TODO()) + Do(ctx) resObj, err := res.Get() if err != nil { @@ -1667,7 +1688,7 @@ func TestTransform(t *testing.T) { } defer tearDown() - clientset, kubeConfig, tearDownFn := setup(t) + ctx, clientset, kubeConfig, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientset, testNamespace, t) @@ -1711,7 +1732,7 @@ func TestTransform(t *testing.T) { crdGVR := schema.GroupVersionResource{Group: fooCRD.Spec.Group, Version: fooCRD.Spec.Versions[0].Name, Resource: "foos"} crclient := dynamicClient.Resource(crdGVR).Namespace(testNamespace) - previousList, err := crclient.List(context.TODO(), metav1.ListOptions{}) + previousList, err := crclient.List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("failed to list CRs before test: %v", err) } @@ -1739,11 +1760,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on CRDs in json", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1756,11 +1777,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on CRDs in json;stream=watch", accept: "application/json;stream=watch;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1773,11 +1794,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on CRDs in yaml", accept: "application/yaml;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1796,11 +1817,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -1814,11 +1835,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1832,11 +1853,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1856,11 +1877,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1873,11 +1894,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1890,11 +1911,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on CRDs in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-4", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-4", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1995,11 +2016,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on CRDs in json", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-5"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-5"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-5", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-5", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2012,11 +2033,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on CRDs in json;stream=watch", accept: "application/json;stream=watch;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-6"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-6"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-6", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-6", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2029,11 +2050,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on CRDs in yaml", accept: "application/yaml;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-7"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-7"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-7", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-7", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2052,11 +2073,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -2070,11 +2091,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -2088,11 +2109,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -2112,11 +2133,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -2129,11 +2150,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -2146,11 +2167,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on CRDs in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-8", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-8", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), cr.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, cr.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2266,9 +2287,9 @@ func TestTransform(t *testing.T) { rv = previousRV } - ctx, cancel := context.WithTimeout(context.Background(), wait.ForeverTestTimeout) + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, wait.ForeverTestTimeout) t.Cleanup(func() { - cancel() + timeoutCancel() }) w, err := client.Get(). Resource(resource).NamespaceIfScoped(obj.GetNamespace(), len(obj.GetNamespace()) > 0). @@ -2279,7 +2300,7 @@ func TestTransform(t *testing.T) { FieldSelector: fields.OneTermEqualSelector("metadata.name", obj.GetName()).String(), }, metav1.ParameterCodec). Param("includeObject", string(tc.includeObject)). - Stream(ctx) + Stream(timeoutCtx) if (tc.wantErr != nil) != (err != nil) { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/apiserver/certreload/certreload_test.go b/test/integration/apiserver/certreload/certreload_test.go index 7559c4dfab0..d1bf7189da5 100644 --- a/test/integration/apiserver/certreload/certreload_test.go +++ b/test/integration/apiserver/certreload/certreload_test.go @@ -43,6 +43,7 @@ import ( "k8s.io/component-base/cli/flag" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) type caWithClient struct { @@ -135,6 +136,10 @@ func TestClientCARecreate(t *testing.T) { } func testClientCA(t *testing.T, recreate bool) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + frontProxyCA, err := newTestCAWithClient( pkix.Name{ CommonName: "test-front-proxy-ca", @@ -170,7 +175,7 @@ func testClientCA(t *testing.T, recreate bool) { clientCAFilename := "" frontProxyCAFilename := "" - kubeClient, kubeconfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 clientCAFilename = opts.Authentication.ClientCert.ClientCA @@ -300,7 +305,7 @@ func testClientCA(t *testing.T, recreate bool) { } // Call an endpoint to make sure we are authenticated - _, err = testClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + _, err = testClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { t.Error(err) } @@ -468,9 +473,13 @@ func TestServingCertRecreate(t *testing.T) { } func testServingCert(t *testing.T, recreate bool) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var servingCertPath string - _, kubeconfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 servingCertPath = opts.SecureServing.ServerCert.CertDirectory @@ -509,7 +518,11 @@ func testServingCert(t *testing.T, recreate bool) { func TestSNICert(t *testing.T) { var servingCertPath string - _, kubeconfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + _, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 servingCertPath = opts.SecureServing.ServerCert.CertDirectory diff --git a/test/integration/apiserver/export_test.go b/test/integration/apiserver/export_test.go index a2ecbea8d2f..b80bc44f4c4 100644 --- a/test/integration/apiserver/export_test.go +++ b/test/integration/apiserver/export_test.go @@ -17,7 +17,6 @@ limitations under the License. package apiserver import ( - "context" "net/http" "testing" @@ -27,31 +26,32 @@ import ( // Tests that the apiserver rejects the export param func TestExportRejection(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() - _, err := clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + _, err := clientSet.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "export-fail"}, }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } defer func() { - clientSet.CoreV1().Namespaces().Delete(context.Background(), "export-fail", metav1.DeleteOptions{}) + if err := clientSet.CoreV1().Namespaces().Delete(ctx, "export-fail", metav1.DeleteOptions{}); err != nil { + t.Errorf("error whiling deleting the namespace, err: %v", err) + } }() - result := clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "true").Do(context.Background()) + result := clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "true").Do(ctx) statusCode := 0 result.StatusCode(&statusCode) if statusCode != http.StatusBadRequest { t.Errorf("expected %v, got %v", http.StatusBadRequest, statusCode) } - result = clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "false").Do(context.Background()) + result = clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "false").Do(ctx) statusCode = 0 result.StatusCode(&statusCode) if statusCode != http.StatusOK { t.Errorf("expected %v, got %v", http.StatusOK, statusCode) } - } diff --git a/test/integration/apiserver/flowcontrol/concurrency_test.go b/test/integration/apiserver/flowcontrol/concurrency_test.go index a090c7528f5..cc39ef94f82 100644 --- a/test/integration/apiserver/flowcontrol/concurrency_test.go +++ b/test/integration/apiserver/flowcontrol/concurrency_test.go @@ -38,6 +38,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -48,22 +49,30 @@ const ( timeout = time.Second * 10 ) -func setup(t testing.TB, maxReadonlyRequestsInFlight, MaxMutatingRequestsInFlight int) (*rest.Config, framework.TearDownFunc) { - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ +func setup(t testing.TB, maxReadonlyRequestsInFlight, maxMutatingRequestsInFlight int) (context.Context, *rest.Config, framework.TearDownFunc) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure all clients are allowed to send requests. opts.Authorization.Modes = []string{"AlwaysAllow"} opts.GenericServerRunOptions.MaxRequestsInFlight = maxReadonlyRequestsInFlight - opts.GenericServerRunOptions.MaxMutatingRequestsInFlight = MaxMutatingRequestsInFlight + opts.GenericServerRunOptions.MaxMutatingRequestsInFlight = maxMutatingRequestsInFlight }, }) - return kubeConfig, tearDownFn + + newTeardown := func() { + cancel() + tearDownFn() + } + return ctx, kubeConfig, newTeardown } func TestPriorityLevelIsolation(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() // NOTE: disabling the feature should fail the test - kubeConfig, closeFn := setup(t, 1, 1) + ctx, kubeConfig, closeFn := setup(t, 1, 1) defer closeFn() loopbackClient := clientset.NewForConfigOrDie(kubeConfig) @@ -106,7 +115,7 @@ func TestPriorityLevelIsolation(t *testing.T) { // "elephant" wg.Add(concurrencyShares + queueLength) streamRequests(concurrencyShares+queueLength, func() { - _, err := noxu1Client.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + _, err := noxu1Client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { t.Error(err) } @@ -114,7 +123,7 @@ func TestPriorityLevelIsolation(t *testing.T) { // "mouse" wg.Add(3) streamRequests(3, func() { - _, err := noxu2Client.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + _, err := noxu2Client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { t.Error(err) } diff --git a/test/integration/apiserver/flowcontrol/concurrency_util_test.go b/test/integration/apiserver/flowcontrol/concurrency_util_test.go index e5b90bae99f..ba5c88f31bf 100644 --- a/test/integration/apiserver/flowcontrol/concurrency_util_test.go +++ b/test/integration/apiserver/flowcontrol/concurrency_util_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -147,10 +148,14 @@ func (d *noxuDelayingAuthorizer) Authorize(ctx context.Context, a authorizer.Att // Secondarily, this test also checks the observed seat utilizations against values derived from expecting that // the throughput observed by the client equals the execution throughput observed by the server. func TestConcurrencyIsolation(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() // NOTE: disabling the feature should fail the test - _, kubeConfig, closeFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure all clients are allowed to send requests. opts.Authorization.Modes = []string{"AlwaysAllow"} @@ -191,7 +196,7 @@ func TestConcurrencyIsolation(t *testing.T) { wg.Add(noxu1NumGoroutines) streamRequests(noxu1NumGoroutines, func() { start := time.Now() - _, err := noxu1Client.CoreV1().Namespaces().Get(context.Background(), "default", metav1.GetOptions{}) + _, err := noxu1Client.CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{}) duration := time.Since(start).Seconds() noxu1LatMeasure.update(duration) if err != nil { @@ -204,7 +209,7 @@ func TestConcurrencyIsolation(t *testing.T) { wg.Add(noxu2NumGoroutines) streamRequests(noxu2NumGoroutines, func() { start := time.Now() - _, err := noxu2Client.CoreV1().Namespaces().Get(context.Background(), "default", metav1.GetOptions{}) + _, err := noxu2Client.CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{}) duration := time.Since(start).Seconds() noxu2LatMeasure.update(duration) if err != nil { diff --git a/test/integration/apiserver/flowcontrol/fight_test.go b/test/integration/apiserver/flowcontrol/fight_test.go index 2279d597c52..e80be152fb2 100644 --- a/test/integration/apiserver/flowcontrol/fight_test.go +++ b/test/integration/apiserver/flowcontrol/fight_test.go @@ -172,7 +172,7 @@ func (ft *fightTest) evaluate(tBeforeCreate, tAfterCreate time.Time) { } func TestConfigConsumerFight(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() - kubeConfig, closeFn := setup(t, 100, 100) + _, kubeConfig, closeFn := setup(t, 100, 100) defer closeFn() const teamSize = 3 ft := newFightTest(t, kubeConfig, teamSize) diff --git a/test/integration/apiserver/flowcontrol/fs_condition_test.go b/test/integration/apiserver/flowcontrol/fs_condition_test.go index 823466be067..e91c382769d 100644 --- a/test/integration/apiserver/flowcontrol/fs_condition_test.go +++ b/test/integration/apiserver/flowcontrol/fs_condition_test.go @@ -17,7 +17,6 @@ limitations under the License. package flowcontrol import ( - "context" "encoding/json" "testing" "time" @@ -38,15 +37,11 @@ import ( func TestConditionIsolation(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() // NOTE: disabling the feature should fail the test - kubeConfig, closeFn := setup(t, 10, 10) + ctx, kubeConfig, closeFn := setup(t, 10, 10) defer closeFn() loopbackClient := clientset.NewForConfigOrDie(kubeConfig) - stopCh := make(chan struct{}) - defer close(stopCh) - ctx := context.Background() - fsOrig := fcboot.SuggestedFlowSchemas[0] t.Logf("Testing Status Condition isolation in FlowSchema %q", fsOrig.Name) fsClient := loopbackClient.FlowcontrolV1beta3().FlowSchemas() @@ -60,7 +55,7 @@ func TestConditionIsolation(t *testing.T) { } dangleOrig = getCondition(fsGot.Status.Conditions, flowcontrol.FlowSchemaConditionDangling) return dangleOrig != nil, nil - }, stopCh) + }, ctx.Done()) ssaType := flowcontrol.FlowSchemaConditionType("test-ssa") patchSSA := flowcontrolapply.FlowSchema(fsOrig.Name). diff --git a/test/integration/apiserver/max_json_patch_operations_test.go b/test/integration/apiserver/max_json_patch_operations_test.go index 357d7092955..c1c455851da 100644 --- a/test/integration/apiserver/max_json_patch_operations_test.go +++ b/test/integration/apiserver/max_json_patch_operations_test.go @@ -28,11 +28,16 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // Tests that the apiserver limits the number of operations in a json patch. func TestMaxJSONPatchOperations(t *testing.T) { - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 }, @@ -50,13 +55,13 @@ func TestMaxJSONPatchOperations(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets("default").Create(ctx, secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } err = c.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). - Body(hugePatch).Do(context.TODO()).Error() + Body(hugePatch).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } diff --git a/test/integration/apiserver/max_request_body_bytes_test.go b/test/integration/apiserver/max_request_body_bytes_test.go index 9b493058b12..6f10c1ed8f1 100644 --- a/test/integration/apiserver/max_request_body_bytes_test.go +++ b/test/integration/apiserver/max_request_body_bytes_test.go @@ -26,11 +26,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // Tests that the apiserver limits the resource size in write operations. func TestMaxResourceSize(t *testing.T) { - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() hugeData := []byte(strings.Repeat("x", 3*1024*1024+1)) @@ -40,7 +45,7 @@ func TestMaxResourceSize(t *testing.T) { c := clientSet.CoreV1().RESTClient() t.Run("Create should limit the request body size", func(t *testing.T) { err := c.Post().AbsPath("/api/v1/namespaces/default/pods"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -56,14 +61,14 @@ func TestMaxResourceSize(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets("default").Create(ctx, secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } t.Run("Update should limit the request body size", func(t *testing.T) { err = c.Put().AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -74,7 +79,7 @@ func TestMaxResourceSize(t *testing.T) { }) t.Run("Patch should limit the request body size", func(t *testing.T) { err = c.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -89,7 +94,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`) err = rest.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } @@ -100,7 +105,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`[{"op":"add","path":"/foo","value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}]`) err = rest.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } @@ -111,7 +116,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.MergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } @@ -122,7 +127,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`) err = rest.Patch(types.MergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } @@ -133,7 +138,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.StrategicMergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } @@ -144,7 +149,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`) err = rest.Patch(types.StrategicMergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } @@ -155,7 +160,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %#v", err) } @@ -166,14 +171,14 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"apiVersion":"v1","kind":"Secret"` + strings.Repeat(" ", 3*1024*1024-100) + `}`) err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } }) t.Run("Delete should limit the request body size", func(t *testing.T) { err = c.Delete().AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -197,7 +202,7 @@ values: ` + strings.Repeat("[", 3*1024*1024)) SetHeader("Content-Type", "application/yaml"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected too large error, got %v", err) } @@ -220,7 +225,7 @@ values: ` + strings.Repeat("[", 3*1024*1024/2-500) + strings.Repeat("]", 3*1024* SetHeader("Content-Type", "application/yaml"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) } @@ -243,7 +248,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/yaml"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) } @@ -264,7 +269,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/json"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(jsonBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected too large error, got %v", err) } @@ -288,7 +293,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/json"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(jsonBody). - DoRaw(context.TODO()) + DoRaw(ctx) // TODO(liggitt): expect bad request on deep nesting, rather than success on dropped unknown field data if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) @@ -313,7 +318,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/json"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(jsonBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) } diff --git a/test/integration/apiserver/no_new_betas_test.go b/test/integration/apiserver/no_new_betas_test.go index fc7fc9706c9..20e21c2e5a1 100644 --- a/test/integration/apiserver/no_new_betas_test.go +++ b/test/integration/apiserver/no_new_betas_test.go @@ -57,7 +57,7 @@ func TestNoNewBetaAPIsByDefault(t *testing.T) { // if you found this because you want to create an integration test for your new beta API, the method you're looking for // is this setupWithResources method and you need to pass the resource you want to enable into it. - kubeClient, _, tearDownFn := setupWithResources(t, + _, kubeClient, _, tearDownFn := setupWithResources(t, []schema.GroupVersion{}, []schema.GroupVersionResource{}, ) diff --git a/test/integration/apiserver/openapi/openapi_enum_test.go b/test/integration/apiserver/openapi/openapi_enum_test.go index a0a1050ab02..1931f801b20 100644 --- a/test/integration/apiserver/openapi/openapi_enum_test.go +++ b/test/integration/apiserver/openapi/openapi_enum_test.go @@ -17,6 +17,7 @@ limitations under the License. package openapi import ( + "context" "encoding/json" "net/http" "testing" @@ -31,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" generated "k8s.io/kubernetes/pkg/generated/openapi" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestEnablingOpenAPIEnumTypes(t *testing.T) { @@ -54,6 +56,10 @@ func TestEnablingOpenAPIEnumTypes(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.OpenAPIEnums, tc.featureEnabled)() getDefinitionsFn := openapi.GetOpenAPIDefinitionsWithoutDisabledFeatures(func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { @@ -73,7 +79,7 @@ func TestEnablingOpenAPIEnumTypes(t *testing.T) { return defs }) - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { config.GenericConfig.OpenAPIConfig = framework.DefaultOpenAPIConfig() config.GenericConfig.OpenAPIConfig.GetDefinitions = getDefinitionsFn diff --git a/test/integration/apiserver/openapi/openapiv3_test.go b/test/integration/apiserver/openapi/openapiv3_test.go index 6683e720dbb..1f0369e48d9 100644 --- a/test/integration/apiserver/openapi/openapiv3_test.go +++ b/test/integration/apiserver/openapi/openapiv3_test.go @@ -41,13 +41,18 @@ import ( "k8s.io/kube-openapi/pkg/spec3" apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" "sigs.k8s.io/yaml" ) func TestOpenAPIV3SpecRoundTrip(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.OpenAPIV3, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() paths := []string{ @@ -189,9 +194,14 @@ func TestOpenAPIV3ProtoRoundtrip(t *testing.T) { // The OpenAPI V3 proto library strips fields that are sibling elements to $ref // See https://github.com/kubernetes/kubernetes/issues/106387 for more details t.Skip("Skipping OpenAPI V3 Proto roundtrip test") + + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.OpenAPIV3, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() rt, err := restclient.TransportFor(kubeConfig) diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index db4077e66aa..a4a8f64db7b 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -17,7 +17,6 @@ limitations under the License. package apiserver import ( - "context" "fmt" "sync" "sync/atomic" @@ -37,7 +36,7 @@ import ( // Tests that the apiserver retries patches func TestPatchConflicts(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientSet, "status-code", t) @@ -66,7 +65,7 @@ func TestPatchConflicts(t *testing.T) { } // Create the object we're going to conflict on - _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets(ns.Name).Create(ctx, secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -89,7 +88,7 @@ func TestPatchConflicts(t *testing.T) { Resource("secrets"). Name("test"). Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "ownerReferences":[{"$patch":"delete","uid":"%s"}]}}`, labelName, value, UIDs[i]))). - Do(context.TODO()). + Do(ctx). Get() if apierrors.IsConflict(err) { @@ -143,7 +142,7 @@ func findOwnerRefByUID(ownerRefs []metav1.OwnerReference, uid types.UID) bool { // with an empty slice is handled property // https://github.com/kubernetes/kubernetes/issues/117470 func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() url := "https://foo.com" @@ -153,7 +152,7 @@ func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { AdmissionregistrationV1(). ValidatingWebhookConfigurations(). Create( - context.TODO(), + ctx, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: "base-validation", @@ -178,7 +177,7 @@ func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { AdmissionregistrationV1(). ValidatingWebhookConfigurations(). Patch( - context.TODO(), + ctx, "base-validation", types.StrategicMergePatchType, []byte(` @@ -198,7 +197,7 @@ func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { AdmissionregistrationV1(). ValidatingWebhookConfigurations(). Patch( - context.TODO(), + ctx, "base-validation", types.StrategicMergePatchType, []byte(`{"$setElementOrder/webhooks":[{"name":"new.foo.com"}],"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"admissionregistration.k8s.io/v1\",\"kind\":\"ValidatingWebhookConfiguration\",\"metadata\":{\"annotations\":{},\"name\":\"base-validation\"},\"webhooks\":[{\"admissionReviewVersions\":[\"v1\"],\"clientConfig\":{\"url\":\"https://foo.com\"},\"name\":\"new.foo.com\",\"sideEffects\":\"None\"}]}\n"}},"webhooks":[{"admissionReviewVersions":["v1"],"clientConfig":{"url":"https://foo.com"},"name":"new.foo.com","sideEffects":"None"},{"$patch":"delete","name":"foo.bar.com"}]}`), diff --git a/test/integration/apiserver/podlogs/podlogs_test.go b/test/integration/apiserver/podlogs/podlogs_test.go index a9c6189ab38..1d2ad10061e 100644 --- a/test/integration/apiserver/podlogs/podlogs_test.go +++ b/test/integration/apiserver/podlogs/podlogs_test.go @@ -51,6 +51,7 @@ import ( "k8s.io/client-go/util/keyutil" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestInsecurePodLogs(t *testing.T) { @@ -77,7 +78,11 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq -----END CERTIFICATE----- `)) - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 // I have no idea what this cert is, but it doesn't matter, we just want something that always fails validation @@ -92,7 +97,7 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq })) defer fakeKubeletServer.Close() - pod := prepareFakeNodeAndPod(context.TODO(), t, clientSet, fakeKubeletServer) + pod := prepareFakeNodeAndPod(ctx, t, clientSet, fakeKubeletServer) insecureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{InsecureSkipTLSVerifyBackend: true}).Do(context.TODO()) if err := insecureResult.Error(); err != nil { @@ -104,7 +109,7 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq t.Fatal(insecureStatusCode) } - secureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(context.TODO()) + secureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(ctx) if err := secureResult.Error(); err == nil || !strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { t.Fatal(err) } @@ -250,7 +255,7 @@ func TestPodLogsKubeletClientCertReload(t *testing.T) { Bytes: fakeKubeletServer.Certificate().Raw, })) - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 opts.KubeletConfig.TLSClientConfig.CAFile = kubeletCA diff --git a/test/integration/apiserver/print_test.go b/test/integration/apiserver/print_test.go index 744e75ffae9..ed4a1405402 100644 --- a/test/integration/apiserver/print_test.go +++ b/test/integration/apiserver/print_test.go @@ -120,7 +120,7 @@ var missingHanlders = sets.NewString( ) func TestServerSidePrint(t *testing.T) { - clientSet, kubeConfig, tearDownFn := setupWithResources(t, + _, clientSet, kubeConfig, tearDownFn := setupWithResources(t, // additional groupversions needed for the test to run []schema.GroupVersion{ {Group: "discovery.k8s.io", Version: "v1"}, diff --git a/test/integration/apiserver/watchcache_test.go b/test/integration/apiserver/watchcache_test.go index ae0b1aa6ec5..4b7c1c56845 100644 --- a/test/integration/apiserver/watchcache_test.go +++ b/test/integration/apiserver/watchcache_test.go @@ -31,11 +31,12 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/controlplane/reconcilers" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // setup create kube-apiserver backed up by two separate etcds, // with one of them containing events and the other all other objects. -func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) { +func multiEtcdSetup(ctx context.Context, t *testing.T) (clientset.Interface, framework.TearDownFunc) { etcdArgs := []string{"--experimental-watch-progress-notify-interval", "1s"} etcd0URL, stopEtcd0, err := framework.RunCustomEtcd("etcd_watchcache0", etcdArgs, nil) if err != nil { @@ -53,7 +54,7 @@ func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) etcdOptions.EtcdServersOverrides = []string{fmt.Sprintf("/events#%s", etcd1URL)} etcdOptions.EnableWatchCache = true - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure we're using the same etcd across apiserver restarts. opts.Etcd = etcdOptions @@ -74,7 +75,6 @@ func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) // Everything but default service creation is checked in StartTestServer above by // waiting for post start hooks, so we just wait for default service to exist. // TODO(wojtek-t): Figure out less fragile way. - ctx := context.Background() if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { _, err := clientSet.CoreV1().Services("default").Get(ctx, "kubernetes", metav1.GetOptions{}) return err == nil, nil @@ -85,10 +85,12 @@ func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) } func TestWatchCacheUpdatedByEtcd(t *testing.T) { - c, closeFn := multiEtcdSetup(t) - defer closeFn() + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() - ctx := context.Background() + c, closeFn := multiEtcdSetup(ctx, t) + defer closeFn() makeConfigMap := func(name string) *v1.ConfigMap { return &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}} @@ -167,7 +169,11 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) { } func BenchmarkListFromWatchCache(b *testing.B) { - c, _, tearDownFn := framework.StartTestServer(b, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(b) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, _, tearDownFn := framework.StartTestServer(ctx, b, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Switch off endpoints reconciler to avoid unnecessary operations. config.ExtraConfig.EndpointReconcilerType = reconcilers.NoneEndpointReconcilerType @@ -185,7 +191,6 @@ func BenchmarkListFromWatchCache(b *testing.B) { go func() { defer wg.Done() - ctx := context.Background() ns := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("namespace-%d", index)}, } @@ -218,7 +223,6 @@ func BenchmarkListFromWatchCache(b *testing.B) { b.ResetTimer() - ctx := context.Background() opts := metav1.ListOptions{ ResourceVersion: "0", } diff --git a/test/integration/auth/accessreview_test.go b/test/integration/auth/accessreview_test.go index 1272bcedf01..647a8c07793 100644 --- a/test/integration/auth/accessreview_test.go +++ b/test/integration/auth/accessreview_test.go @@ -32,6 +32,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // Inject into control plane an authorizer that uses user info. @@ -57,7 +58,11 @@ func alwaysAlice(req *http.Request) (*authenticator.Response, bool, error) { } func TestSubjectAccessReview(t *testing.T) { - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Unset BearerToken to disable BearerToken authenticator. config.GenericConfig.LoopbackClientConfig.BearerToken = "" @@ -127,7 +132,7 @@ func TestSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), test.sar, metav1.CreateOptions{}) + response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(ctx, test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: @@ -149,7 +154,12 @@ func TestSubjectAccessReview(t *testing.T) { } func TestSelfSubjectAccessReview(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var mutex sync.Mutex + username := "alice" authenticatorFunc := func(req *http.Request) (*authenticator.Response, bool, error) { mutex.Lock() @@ -164,7 +174,7 @@ func TestSelfSubjectAccessReview(t *testing.T) { }, true, nil } - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Unset BearerToken to disable BearerToken authenticator. config.GenericConfig.LoopbackClientConfig.BearerToken = "" @@ -225,7 +235,7 @@ func TestSelfSubjectAccessReview(t *testing.T) { username = test.username mutex.Unlock() - response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.TODO(), test.sar, metav1.CreateOptions{}) + response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: @@ -247,7 +257,11 @@ func TestSelfSubjectAccessReview(t *testing.T) { } func TestLocalSubjectAccessReview(t *testing.T) { - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Unset BearerToken to disable BearerToken authenticator. config.GenericConfig.LoopbackClientConfig.BearerToken = "" @@ -345,7 +359,7 @@ func TestLocalSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(context.TODO(), test.sar, metav1.CreateOptions{}) + response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(ctx, test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: diff --git a/test/integration/auth/auth_test.go b/test/integration/auth/auth_test.go index becba2d72e3..674f98dffd7 100644 --- a/test/integration/auth/auth_test.go +++ b/test/integration/auth/auth_test.go @@ -72,6 +72,7 @@ import ( "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/authutil" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -451,7 +452,11 @@ func getTestRequests(namespace string) []testRequest { // // TODO(etune): write a fuzz test of the REST API. func TestAuthModeAlwaysAllow(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -555,7 +560,11 @@ func getPreviousResourceVersionKey(url, id string) string { } func TestAuthModeAlwaysDeny(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -599,7 +608,11 @@ func TestAuthModeAlwaysDeny(t *testing.T) { // TestAliceNotForbiddenOrUnauthorized tests a user who is known to // the authentication system and authorized to do any actions. func TestAliceNotForbiddenOrUnauthorized(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -675,7 +688,11 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) { // the authentication system but not authorized to do any actions // should receive "Forbidden". func TestBobIsForbidden(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -724,7 +741,11 @@ func TestBobIsForbidden(t *testing.T) { // An authorization module is installed in this scenario for integration // test purposes, but requests aren't expected to reach it. func TestUnknownUserIsUnauthorized(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -796,7 +817,11 @@ func (impersonateAuthorizer) Authorize(ctx context.Context, a authorizer.Attribu } func TestImpersonateIsForbidden(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1098,9 +1123,13 @@ func (a *trackingAuthorizer) Authorize(ctx context.Context, attributes authorize // TestAuthorizationAttributeDetermination tests that authorization attributes are built correctly func TestAuthorizationAttributeDetermination(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + trackingAuthorizer := &trackingAuthorizer{} - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1170,7 +1199,11 @@ func TestAuthorizationAttributeDetermination(t *testing.T) { // TestNamespaceAuthorization tests that authorization can be controlled // by namespace. func TestNamespaceAuthorization(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1271,7 +1304,11 @@ func TestNamespaceAuthorization(t *testing.T) { // TestKindAuthorization tests that authorization can be controlled // by namespace. func TestKindAuthorization(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1354,7 +1391,11 @@ func TestKindAuthorization(t *testing.T) { // TestReadOnlyAuthorization tests that authorization can be controlled // by namespace. func TestReadOnlyAuthorization(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1418,6 +1459,10 @@ func TestWebhookTokenAuthenticatorCustomDial(t *testing.T) { } func testWebhookTokenAuthenticator(customDialer bool, t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + authServer := newTestWebhookTokenAuthServer() defer authServer.Close() var authenticator authenticator.Request @@ -1433,7 +1478,7 @@ func testWebhookTokenAuthenticator(customDialer bool, t *testing.T) { t.Fatalf("error starting webhook token authenticator server: %v", err) } - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} diff --git a/test/integration/auth/bootstraptoken_test.go b/test/integration/auth/bootstraptoken_test.go index b513d10dfe7..3ada62dc14d 100644 --- a/test/integration/auth/bootstraptoken_test.go +++ b/test/integration/auth/bootstraptoken_test.go @@ -18,6 +18,7 @@ package auth import ( "bytes" + "context" "fmt" "io" "net/http" @@ -36,6 +37,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) type bootstrapSecrets []*corev1.Secret @@ -119,9 +121,13 @@ func TestBootstrapTokenAuth(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + authenticator := group.NewAuthenticatedGroupAdder(bearertoken.New(bootstrap.NewTokenAuthenticator(bootstrapSecrets{test.secret}))) - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Authorization.Modes = []string{"AlwaysAllow"} }, diff --git a/test/integration/auth/dynamic_client_test.go b/test/integration/auth/dynamic_client_test.go index 1192e46e656..be862869d0b 100644 --- a/test/integration/auth/dynamic_client_test.go +++ b/test/integration/auth/dynamic_client_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/cmd/kube-apiserver/app/options" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestDynamicClientBuilder(t *testing.T) { @@ -51,7 +52,11 @@ func TestDynamicClientBuilder(t *testing.T) { t.Fatalf("parse duration failed: %v", err) } - baseClient, baseConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + baseClient, baseConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceAccountSigningKeyFile = tmpfile.Name() opts.ServiceAccountTokenMaxExpiration = maxExpirationDuration @@ -95,7 +100,7 @@ func TestDynamicClientBuilder(t *testing.T) { // We want to trigger token rotation here by deleting service account // the dynamic client was using. - if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), saName, metav1.DeleteOptions{}); err != nil { + if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(ctx, saName, metav1.DeleteOptions{}); err != nil { t.Fatalf("delete service account %s failed: %v", saName, err) } time.Sleep(time.Second * 10) diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index fab5964c50d..bbe0d0d5a6d 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -59,6 +59,7 @@ import ( rolebindingstore "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func clientForToken(user string, rt http.RoundTripper) *http.Client { @@ -537,13 +538,18 @@ func TestRBAC(t *testing.T) { "user-with-no-permissions": {Name: "user-with-no-permissions"}, }))) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var tearDownAuthorizerFn func() defer func() { if tearDownAuthorizerFn != nil { tearDownAuthorizerFn() } }() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. // Also disable namespace lifecycle to workaroung the test limitation that first creates @@ -669,19 +675,22 @@ func TestRBAC(t *testing.T) { } func TestBootstrapping(t *testing.T) { - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Authorization.Modes = []string{"RBAC"} }, }) defer tearDownFn() - watcher, err := clientset.RbacV1().ClusterRoles().Watch(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) + watcher, err := clientset.RbacV1().ClusterRoles().Watch(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { t.Fatalf("unexpected error: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() + _, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) { if event.Type != watch.Added { return false, nil @@ -692,7 +701,7 @@ func TestBootstrapping(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - clusterRoles, err := clientset.RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + clusterRoles, err := clientset.RbacV1().ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -708,7 +717,7 @@ func TestBootstrapping(t *testing.T) { t.Errorf("missing cluster-admin: %v", clusterRoles) - healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw(context.TODO()) + healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw(ctx) if err != nil { t.Error(err) } @@ -727,7 +736,11 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { etcdConfig := framework.SharedEtcd() - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure we're using the same etcd across apiserver restarts. opts.Etcd.StorageConfig = *etcdConfig @@ -738,7 +751,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { // Modify the default RBAC discovery ClusterRoleBidnings to look more like the defaults that // existed prior to v1.14, but with user modifications. t.Logf("Modifying default `system:discovery` ClusterRoleBinding") - discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:discovery", metav1.GetOptions{}) + discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:discovery", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err) } @@ -751,21 +764,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { APIGroup: "rbac.authorization.k8s.io", }, } - if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), discRoleBinding, metav1.UpdateOptions{}); err != nil { + if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(ctx, discRoleBinding, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update `system:discovery` ClusterRoleBinding: %v", err) } t.Logf("Modifying default `system:basic-user` ClusterRoleBinding") - basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:basic-user", metav1.GetOptions{}) + basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:basic-user", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err) } basicUserRoleBinding.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "false" basicUserRoleBinding.Annotations["rbac-discovery-upgrade-test"] = "pass" - if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), basicUserRoleBinding, metav1.UpdateOptions{}); err != nil { + if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(ctx, basicUserRoleBinding, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update `system:basic-user` ClusterRoleBinding: %v", err) } t.Logf("Deleting default `system:public-info-viewer` ClusterRoleBinding") - if err = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "system:public-info-viewer", metav1.DeleteOptions{}); err != nil { + if err = client.RbacV1().ClusterRoleBindings().Delete(ctx, "system:public-info-viewer", metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete `system:public-info-viewer` ClusterRoleBinding: %v", err) } @@ -775,7 +788,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { // Check that upgraded API servers inherit `system:public-info-viewer` settings from // `system:discovery`, and respect auto-reconciliation annotations. - client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure we're using the same etcd across apiserver restarts. opts.Etcd.StorageConfig = *etcdConfig @@ -783,21 +796,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { }, }) - newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:discovery", metav1.GetOptions{}) + newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:discovery", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err) } if !reflect.DeepEqual(newDiscRoleBinding, discRoleBinding) { t.Errorf("`system:discovery` should have been unmodified. Wanted: %v, got %v", discRoleBinding, newDiscRoleBinding) } - newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:basic-user", metav1.GetOptions{}) + newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:basic-user", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err) } if !reflect.DeepEqual(newBasicUserRoleBinding, basicUserRoleBinding) { t.Errorf("`system:basic-user` should have been unmodified. Wanted: %v, got %v", basicUserRoleBinding, newBasicUserRoleBinding) } - publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:public-info-viewer", metav1.GetOptions{}) + publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:public-info-viewer", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:public-info-viewer` ClusterRoleBinding: %v", err) } diff --git a/test/integration/auth/selfsubjectreview_test.go b/test/integration/auth/selfsubjectreview_test.go index 5d81ba80347..ce4c16448c8 100644 --- a/test/integration/auth/selfsubjectreview_test.go +++ b/test/integration/auth/selfsubjectreview_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestGetsSelfAttributes(t *testing.T) { @@ -88,6 +89,10 @@ func TestGetsSelfAttributes(t *testing.T) { }, } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APISelfSubjectReview, true)() var respMu sync.RWMutex @@ -95,7 +100,7 @@ func TestGetsSelfAttributes(t *testing.T) { Name: "stub", } - kubeClient, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1alpha1=true") opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1beta1=true") @@ -122,7 +127,7 @@ func TestGetsSelfAttributes(t *testing.T) { res, err := kubeClient.AuthenticationV1alpha1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -149,7 +154,7 @@ func TestGetsSelfAttributes(t *testing.T) { res2, err := kubeClient.AuthenticationV1beta1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -202,7 +207,6 @@ func TestGetsSelfAttributes(t *testing.T) { } }) } - } func TestGetsSelfAttributesError(t *testing.T) { @@ -211,7 +215,11 @@ func TestGetsSelfAttributesError(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APISelfSubjectReview, true)() - kubeClient, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1alpha1=true") opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1beta1=true") @@ -243,7 +251,7 @@ func TestGetsSelfAttributesError(t *testing.T) { _, err := kubeClient.AuthenticationV1alpha1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) if err == nil { t.Fatalf("expected error: %v, got nil", err) } @@ -259,7 +267,7 @@ func TestGetsSelfAttributesError(t *testing.T) { _, err := kubeClient.AuthenticationV1beta1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) if err == nil { t.Fatalf("expected error: %v, got nil", err) } diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go index f31a1091000..912aba31979 100644 --- a/test/integration/auth/svcaccttoken_test.go +++ b/test/integration/auth/svcaccttoken_test.go @@ -49,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -74,9 +75,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { var tokenGenerator serviceaccount.TokenGenerator + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Start the server var serverAddress string - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -163,7 +168,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { } warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -172,18 +177,18 @@ func TestServiceAccountTokenCreate(t *testing.T) { treqWithBadName := treq.DeepCopy() treqWithBadName.Name = "invalid-name" - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treqWithBadName, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "must match the service account name") { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treqWithBadName, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "must match the service account name") { t.Fatalf("expected err creating token with mismatched name but got: %#v", resp) } treqWithBadNamespace := treq.DeepCopy() treqWithBadNamespace.Namespace = "invalid-namespace" - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treqWithBadNamespace, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "does not match the namespace") { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treqWithBadNamespace, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "does not match the namespace") { t.Fatalf("expected err creating token with mismatched namespace but got: %#v, %v", resp, err) } warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -227,7 +232,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { } warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -235,7 +240,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -245,21 +250,21 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = pod.UID warningHandler.clear() - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp) } warningHandler.assertEqual(t, nil) // no uid treq.Spec.BoundObjectRef.UID = noUID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -300,7 +305,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { } warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -308,7 +313,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to nonexistant secret but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -318,21 +323,21 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = secret.UID warningHandler.clear() - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to secret with wrong uid but got: %#v", resp) } warningHandler.assertEqual(t, nil) // no uid treq.Spec.BoundObjectRef.UID = noUID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -368,7 +373,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -385,7 +390,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -436,7 +441,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = pod.UID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -492,7 +497,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = pod.UID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -534,7 +539,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -552,7 +557,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -582,7 +587,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalPod.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) @@ -625,7 +630,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalSecret.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) @@ -670,7 +675,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalSecret.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) @@ -716,7 +721,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalSecret.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, []string{fmt.Sprintf("requested expiration of %d seconds shortened to %d seconds", tooLongExpirationTime, maxExpirationSeconds)}) @@ -745,9 +750,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Log("get token") warningHandler.clear() - tokenRequest, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken( - context.TODO(), - sa.Name, + tokenRequest, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ Audiences: []string{"api"}, diff --git a/test/integration/clustercidr/ipam_test.go b/test/integration/clustercidr/ipam_test.go index 1f6a1798856..d083cb8db1f 100644 --- a/test/integration/clustercidr/ipam_test.go +++ b/test/integration/clustercidr/ipam_test.go @@ -43,11 +43,14 @@ import ( ) func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -61,9 +64,6 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -130,10 +130,14 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -147,9 +151,6 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -207,10 +208,14 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -224,9 +229,6 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -303,10 +305,14 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -320,9 +326,6 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -388,10 +391,14 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -405,9 +412,6 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) diff --git a/test/integration/controlplane/synthetic_controlplane_test.go b/test/integration/controlplane/synthetic_controlplane_test.go index 443c27170ed..6efc4988834 100644 --- a/test/integration/controlplane/synthetic_controlplane_test.go +++ b/test/integration/controlplane/synthetic_controlplane_test.go @@ -46,6 +46,7 @@ import ( kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -189,7 +190,11 @@ func TestStatus(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(options *options.ServerRunOptions) { if tc.modifyOptions != nil { tc.modifyOptions(options) diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index fd175b4491a..bfd2e43f010 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -39,7 +39,6 @@ import ( "k8s.io/client-go/tools/events" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/retry" - "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -50,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/profile" labelsutil "k8s.io/kubernetes/pkg/util/labels" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) var zero = int64(0) @@ -59,6 +59,9 @@ func setup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *d } func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + modifyServerRunOptions := serverSetup.ModifyServerRunOptions serverSetup.ModifyServerRunOptions = func(opts *options.ServerRunOptions) { if modifyServerRunOptions != nil { @@ -73,11 +76,9 @@ func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) ( ) } - clientSet, config, closeFn := framework.StartTestServer(t, serverSetup) + clientSet, config, closeFn := framework.StartTestServer(ctx, t, serverSetup) resyncPeriod := 12 * time.Hour - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "daemonset-informers")), resyncPeriod) dc, err := daemon.NewDaemonSetsController( ctx, @@ -487,13 +488,13 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } defer cleanupDaemonSets(t, clientset, ds) - _, err = nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -520,7 +521,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -622,7 +623,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) { }, } - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -657,7 +658,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -668,7 +669,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionFalse}, } - _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -699,7 +700,7 @@ func TestInsufficientCapacityNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m") ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -708,7 +709,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node := newNode("node-with-limited-memory", nil) node.Status.Allocatable = allocatableResources("100M", "200m") - _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -727,7 +728,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node1 := newNode("node-with-enough-memory", nil) node1.Status.Allocatable = allocatableResources("200M", "2000m") - _, err = nodeClient.Create(context.TODO(), node1, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, node1, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -754,7 +755,7 @@ func TestLaunchWithHashCollision(t *testing.T) { go dc.Run(ctx, 2) // Create single node - _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) + _, err := nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -768,7 +769,7 @@ func TestLaunchWithHashCollision(t *testing.T) { MaxUnavailable: &oneIntString, }, } - ds, err := dsClient.Create(context.TODO(), orgDs, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, orgDs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -779,7 +780,7 @@ func TestLaunchWithHashCollision(t *testing.T) { t.Fatalf("Failed to create DaemonSet: %v", err) } - ds, err = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get DaemonSet: %v", err) } @@ -790,7 +791,7 @@ func TestLaunchWithHashCollision(t *testing.T) { // Look up the ControllerRevision for the DaemonSet _, name := hashAndNameForDaemonSet(ds) - revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) + revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil || revision == nil { t.Fatalf("Failed to look up ControllerRevision: %v", err) } @@ -812,7 +813,7 @@ func TestLaunchWithHashCollision(t *testing.T) { Data: revision.Data, Revision: revision.Revision + 1, } - _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision, metav1.CreateOptions{}) + _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, newRevision, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create ControllerRevision: %v", err) } @@ -859,7 +860,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { go dc.Run(ctx, 2) // Create single node - _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) + _, err := nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -873,7 +874,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { MaxUnavailable: &oneIntString, }, } - ds, err := dsClient.Create(context.TODO(), orgDs, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, orgDs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -884,14 +885,14 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { t.Fatalf("Failed to create DaemonSet: %v", err) } - ds, err = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get DaemonSet: %v", err) } // Look up the ControllerRevision for the DaemonSet _, name := hashAndNameForDaemonSet(ds) - revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) + revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil || revision == nil { t.Fatalf("Failed to look up ControllerRevision: %v", err) } @@ -913,14 +914,14 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { Data: revision.Data, Revision: revision.Revision + 1, } - _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision, metav1.CreateOptions{}) + _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, newRevision, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create ControllerRevision: %v", err) } t.Logf("revision: %v", newName) // ensure the daemonset to be synced - _, err = nodeClient.Create(context.TODO(), newNode("second-node", nil), metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, newNode("second-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -946,7 +947,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { } err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { - revs, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) + revs, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list controllerrevision: %v", err) } @@ -990,7 +991,7 @@ func TestTaintedNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -999,13 +1000,13 @@ func TestTaintedNode(t *testing.T) { nodeWithTaint := newNode("node-with-taint", nil) nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}} - _, err = nodeClient.Create(context.TODO(), nodeWithTaint, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, nodeWithTaint, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create nodeWithTaint: %v", err) } nodeWithoutTaint := newNode("node-without-taint", nil) - _, err = nodeClient.Create(context.TODO(), nodeWithoutTaint, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, nodeWithoutTaint, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create nodeWithoutTaint: %v", err) } @@ -1014,13 +1015,13 @@ func TestTaintedNode(t *testing.T) { validateDaemonSetStatus(dsClient, ds.Name, 1, t) // remove taint from nodeWithTaint - nodeWithTaint, err = nodeClient.Get(context.TODO(), "node-with-taint", metav1.GetOptions{}) + nodeWithTaint, err = nodeClient.Get(ctx, "node-with-taint", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to retrieve nodeWithTaint: %v", err) } nodeWithTaintCopy := nodeWithTaint.DeepCopy() nodeWithTaintCopy.Spec.Taints = []v1.Taint{} - _, err = nodeClient.Update(context.TODO(), nodeWithTaintCopy, metav1.UpdateOptions{}) + _, err = nodeClient.Update(ctx, nodeWithTaintCopy, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to update nodeWithTaint: %v", err) } @@ -1119,7 +1120,7 @@ func TestUpdateStatusDespitePodCreationFailure(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index 39ee9adbf95..d9f24cd5509 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -20,16 +20,21 @@ import ( "context" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestAdmission(t *testing.T) { - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(cfg *controlplane.Config) { cfg.GenericConfig.EnableProfiling = true cfg.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds() @@ -55,7 +60,7 @@ func TestAdmission(t *testing.T) { }, } - updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating pod: %v", err) } diff --git a/test/integration/dualstack/dualstack_endpoints_test.go b/test/integration/dualstack/dualstack_endpoints_test.go index 35beff8ee3e..24c2c02f4ed 100644 --- a/test/integration/dualstack/dualstack_endpoints_test.go +++ b/test/integration/dualstack/dualstack_endpoints_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpointslice" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestDualStackEndpoints(t *testing.T) { @@ -43,7 +44,11 @@ func TestDualStackEndpoints(t *testing.T) { return map[string]string{"foo": "bar"} } - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. @@ -54,7 +59,7 @@ func TestDualStackEndpoints(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -83,7 +88,7 @@ func TestDualStackEndpoints(t *testing.T) { }, }, } - if _, err := client.CoreV1().Nodes().Create(context.TODO(), testNode, metav1.CreateOptions{}); err != nil { + if _, err := client.CoreV1().Nodes().Create(ctx, testNode, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) } @@ -103,8 +108,6 @@ func TestDualStackEndpoints(t *testing.T) { client, 1*time.Second) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // Start informer and controllers informers.Start(ctx.Done()) // use only one worker to serialize the updates @@ -166,7 +169,7 @@ func TestDualStackEndpoints(t *testing.T) { }, } - createdPod, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -177,7 +180,7 @@ func TestDualStackEndpoints(t *testing.T) { Phase: v1.PodRunning, PodIPs: []v1.PodIP{{IP: podIPbyFamily[v1.IPv4Protocol]}, {IP: podIPbyFamily[v1.IPv6Protocol]}}, } - _, err = client.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), createdPod, metav1.UpdateOptions{}) + _, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) } @@ -205,7 +208,7 @@ func TestDualStackEndpoints(t *testing.T) { } // create a service - _, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating service: %v", err) } @@ -214,7 +217,7 @@ func TestDualStackEndpoints(t *testing.T) { // legacy endpoints are not dual stack // and use the address of the first IP family if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - e, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + e, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Logf("Error fetching endpoints: %v", err) return false, nil @@ -236,7 +239,7 @@ func TestDualStackEndpoints(t *testing.T) { // wait until the endpoint slices are created err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { lSelector := discovery.LabelServiceName + "=" + svc.Name - esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: lSelector}) + esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector}) if err != nil { t.Logf("Error listing EndpointSlices: %v", err) return false, nil diff --git a/test/integration/dualstack/dualstack_test.go b/test/integration/dualstack/dualstack_test.go index b052d1c45d9..e2752b1a72e 100644 --- a/test/integration/dualstack/dualstack_test.go +++ b/test/integration/dualstack/dualstack_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" netutils "k8s.io/utils/net" ) @@ -45,7 +46,11 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { // Create an IPv4 single stack control-plane serviceCIDR := "10.0.0.0/16" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -54,7 +59,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -247,7 +252,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -256,7 +261,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -272,7 +277,11 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { // Create an IPv6 only dual stack control-plane serviceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10") @@ -282,7 +291,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -462,7 +471,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -471,7 +480,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -488,7 +497,11 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -497,7 +510,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -724,7 +737,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -733,7 +746,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -751,7 +764,11 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { serviceCIDR := "2001:db8:1::/112" secondaryServiceCIDR := "10.0.0.0/16" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10") @@ -761,7 +778,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -772,7 +789,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { // verify client is working if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - _, err := client.CoreV1().Endpoints("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { t.Logf("error fetching endpoints: %v", err) return false, nil @@ -943,7 +960,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -952,7 +969,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -970,7 +987,11 @@ func TestUpgradeDowngrade(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -979,7 +1000,7 @@ func TestUpgradeDowngrade(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1006,12 +1027,12 @@ func TestUpgradeDowngrade(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error while creating service:%v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1023,7 +1044,7 @@ func TestUpgradeDowngrade(t *testing.T) { // upgrade it requireDualStack := v1.IPFamilyPolicyRequireDualStack svc.Spec.IPFamilyPolicy = &requireDualStack - upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error upgrading service to dual stack. %v", err) } @@ -1036,7 +1057,7 @@ func TestUpgradeDowngrade(t *testing.T) { upgraded.Spec.IPFamilyPolicy = &singleStack upgraded.Spec.ClusterIPs = upgraded.Spec.ClusterIPs[0:1] upgraded.Spec.IPFamilies = upgraded.Spec.IPFamilies[0:1] - downgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), upgraded, metav1.UpdateOptions{}) + downgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, upgraded, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error downgrading service to single stack. %v", err) } @@ -1046,7 +1067,7 @@ func TestUpgradeDowngrade(t *testing.T) { // run test again this time without removing secondary IPFamily or ClusterIP downgraded.Spec.IPFamilyPolicy = &requireDualStack - upgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), downgraded, metav1.UpdateOptions{}) + upgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, downgraded, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error upgrading service to dual stack. %v", err) } @@ -1057,7 +1078,7 @@ func TestUpgradeDowngrade(t *testing.T) { upgradedAgain.Spec.IPFamilyPolicy = &singleStack // api-server automatically removes the secondary ClusterIP and IPFamily // when a servie is downgraded. - downgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), upgradedAgain, metav1.UpdateOptions{}) + downgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, upgradedAgain, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error downgrading service to single stack. %v", err) } @@ -1073,7 +1094,11 @@ func TestConvertToFromExternalName(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -1082,7 +1107,7 @@ func TestConvertToFromExternalName(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1108,12 +1133,12 @@ func TestConvertToFromExternalName(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error while creating service:%v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1127,7 +1152,7 @@ func TestConvertToFromExternalName(t *testing.T) { svc.Spec.ClusterIP = "" // not clearing ClusterIPs svc.Spec.ExternalName = "something.somewhere" - externalNameSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + externalNameSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error converting service to external name. %v", err) } @@ -1139,7 +1164,7 @@ func TestConvertToFromExternalName(t *testing.T) { // convert to a ClusterIP service externalNameSvc.Spec.Type = v1.ServiceTypeClusterIP externalNameSvc.Spec.ExternalName = "" - clusterIPSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), externalNameSvc, metav1.UpdateOptions{}) + clusterIPSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, externalNameSvc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error converting service to ClusterIP. %v", err) } @@ -1154,7 +1179,11 @@ func TestPreferDualStack(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -1163,7 +1192,7 @@ func TestPreferDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1193,12 +1222,12 @@ func TestPreferDualStack(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error while creating service:%v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1209,7 +1238,7 @@ func TestPreferDualStack(t *testing.T) { // update it svc.Spec.Selector = map[string]string{"foo": "bar"} - upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error upgrading service to dual stack. %v", err) } @@ -1227,7 +1256,11 @@ func TestServiceUpdate(t *testing.T) { // Create an IPv4 single stack control-plane serviceCIDR := "10.0.0.0/16" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -1236,7 +1269,7 @@ func TestServiceUpdate(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1262,26 +1295,26 @@ func TestServiceUpdate(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) // if no error was expected validate the service otherwise return if err != nil { t.Errorf("unexpected error creating service:%v", err) return } - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } // update using put svc.Labels = map[string]string{"x": "y"} - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Errorf("Unexpected error updating the service %s %v", svc.Name, err) } - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1296,12 +1329,12 @@ func TestServiceUpdate(t *testing.T) { t.Fatalf("failed to json.Marshal labels: %v", err) } - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("unexpected error patching service using strategic merge patch. %v", err) } - current, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + current, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1323,17 +1356,16 @@ func TestServiceUpdate(t *testing.T) { t.Fatalf("unexpected error creating json patch. %v", err) } - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("unexpected error patching service using merge patch. %v", err) } // validate the service was created correctly if it was not expected to fail - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } - } // validateServiceAndClusterIPFamily checks that the service has the expected IPFamilies @@ -1386,10 +1418,14 @@ func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1. func TestUpgradeServicePreferToDualStack(t *testing.T) { sharedEtcd := framework.SharedEtcd() + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Create an IPv4 only dual stack control-plane serviceCIDR := "192.168.0.0/24" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = serviceCIDR @@ -1398,7 +1434,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1429,12 +1465,12 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1447,7 +1483,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) @@ -1457,7 +1493,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1466,7 +1502,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { t.Fatalf("creating kubernetes service timed out") } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1479,11 +1515,15 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { func TestDowngradeServicePreferToDualStack(t *testing.T) { sharedEtcd := framework.SharedEtcd() + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Create a dual stack control-plane serviceCIDR := "192.168.0.0/24" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) @@ -1492,7 +1532,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1521,12 +1561,12 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { }, } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1537,7 +1577,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { tearDownFn() // reset secondary - client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = serviceCIDR @@ -1547,7 +1587,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1556,7 +1596,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { t.Fatalf("creating kubernetes service timed out") } // validate the service is still there. - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1576,7 +1616,11 @@ type specMergePatch struct { // tests success when converting ClusterIP:Headless service to ExternalName func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) { - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() ns := framework.CreateNamespaceOrDie(client, "test-service-allocate-node-ports", t) @@ -1594,7 +1638,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) { } var err error - service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + service, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating test service: %v", err) } @@ -1610,7 +1654,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) { t.Fatalf("failed to json.Marshal ports: %v", err) } - _, err = client.CoreV1().Services(ns.Name).Patch(context.TODO(), service.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.CoreV1().Services(ns.Name).Patch(ctx, service.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("unexpected error patching service using strategic merge patch. %v", err) } diff --git a/test/integration/examples/webhook_test.go b/test/integration/examples/webhook_test.go index 0c7f04be765..6dcf2c309f7 100644 --- a/test/integration/examples/webhook_test.go +++ b/test/integration/examples/webhook_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/controlplane/reconcilers" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestWebhookLoopback(t *testing.T) { @@ -41,7 +42,11 @@ func TestWebhookLoopback(t *testing.T) { called := int32(0) - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { }, ModifyServerConfig: func(config *controlplane.Config) { @@ -67,7 +72,7 @@ func TestWebhookLoopback(t *testing.T) { fail := admissionregistrationv1.Fail noSideEffects := admissionregistrationv1.SideEffectClassNone - _, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "webhooktest.example.com"}, Webhooks: []admissionregistrationv1.MutatingWebhook{{ Name: "webhooktest.example.com", @@ -88,7 +93,7 @@ func TestWebhookLoopback(t *testing.T) { } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) { - _, err = client.CoreV1().ConfigMaps("default").Create(context.TODO(), &v1.ConfigMap{ + _, err = client.CoreV1().ConfigMaps("default").Create(ctx, &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "webhook-test"}, Data: map[string]string{"invalid key": "value"}, }, metav1.CreateOptions{}) diff --git a/test/integration/framework/test_server.go b/test/integration/framework/test_server.go index 1fea953a983..ce1599ea544 100644 --- a/test/integration/framework/test_server.go +++ b/test/integration/framework/test_server.go @@ -59,13 +59,14 @@ type TestServerSetup struct { type TearDownFunc func() // StartTestServer runs a kube-apiserver, optionally calling out to the setup.ModifyServerRunOptions and setup.ModifyServerConfig functions -func StartTestServer(t testing.TB, setup TestServerSetup) (client.Interface, *rest.Config, TearDownFunc) { +func StartTestServer(ctx context.Context, t testing.TB, setup TestServerSetup) (client.Interface, *rest.Config, TearDownFunc) { + ctx, cancel := context.WithCancel(ctx) + certDir, err := os.MkdirTemp("", "test-integration-"+strings.ReplaceAll(t.Name(), "/", "_")) if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) } - ctx, cancel := context.WithCancel(context.Background()) var errCh chan error tearDownFn := func() { // Calling cancel function is stopping apiserver and cleaning up diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 2380ddcbb55..33986da2e1a 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -123,7 +123,11 @@ func TestPerformance(t *testing.T) { t.Skip("Skipping because we want to run short tests") } - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} diff --git a/test/integration/network/services_test.go b/test/integration/network/services_test.go index 5a93a5d856a..294d640bbaa 100644 --- a/test/integration/network/services_test.go +++ b/test/integration/network/services_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // TestServicesFinalizersRepairLoop tests that Services participate in the object @@ -41,7 +42,11 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { clusterIP := "10.0.0.20" interval := 5 * time.Second - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -53,7 +58,7 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { // verify client is working if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - _, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { t.Logf("error fetching endpoints: %v", err) return false, nil @@ -82,20 +87,20 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { } // Create service - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), &svcNodePort, metav1.CreateOptions{}); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil { t.Errorf("unexpected error creating service: %v", err) } t.Logf("Created service: %s", svcNodePort.Name) // Check the service has been created correctly - svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{}) + svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{}) if err != nil || svc.Spec.ClusterIP != clusterIP { t.Errorf("created service is not correct: %v", err) } t.Logf("Service created successfully: %v", svc) // Delete service - if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(context.TODO(), svcNodePort.Name, metav1.DeleteOptions{}); err != nil { + if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(ctx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("unexpected error deleting service: %v", err) } t.Logf("Deleted service: %s", svcNodePort.Name) @@ -104,26 +109,26 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { time.Sleep(interval + 1) // Check that the service was not deleted and the IP is already allocated - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{}) if err != nil || svc.Spec.ClusterIP != clusterIP { t.Errorf("created service is not correct: %v", err) } t.Logf("Service after Delete: %v", svc) // Remove the finalizer - if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil { + if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil { t.Errorf("unexpected error removing finalizer: %v", err) } t.Logf("Removed service finalizer: %s", svcNodePort.Name) // Check that the service was deleted - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{}) if err == nil { t.Errorf("service was not delete: %v", err) } // Try to create service again - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), &svcNodePort, metav1.CreateOptions{}); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil { t.Errorf("unexpected error creating service: %v", err) } t.Logf("Created service: %s", svcNodePort.Name) @@ -133,7 +138,11 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { func TestServiceCIDR28bits(t *testing.T) { serviceCIDR := "10.0.0.0/28" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -142,7 +151,7 @@ func TestServiceCIDR28bits(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { return false, err } @@ -169,7 +178,7 @@ func TestServiceCIDR28bits(t *testing.T) { }, } - _, err := client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating test service: %v", err) } diff --git a/test/integration/node/lifecycle_test.go b/test/integration/node/lifecycle_test.go index 497517fda5f..5346c29e01e 100644 --- a/test/integration/node/lifecycle_test.go +++ b/test/integration/node/lifecycle_test.go @@ -17,7 +17,6 @@ limitations under the License. package node import ( - "context" "fmt" "testing" "time" @@ -104,9 +103,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)() testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil) - - // Build clientset and informers for controllers. - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet // Build clientset and informers for controllers. @@ -263,7 +259,6 @@ func TestTaintBasedEvictions(t *testing.T) { podTolerations.SetExternalKubeClientSet(externalClientset) podTolerations.SetExternalKubeInformerFactory(externalInformers) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet // Start NodeLifecycleController for taint. @@ -325,7 +320,7 @@ func TestTaintBasedEvictions(t *testing.T) { }) } nodes = append(nodes, node) - if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create node: %q, err: %v", klog.KObj(node), err) } } @@ -337,7 +332,7 @@ func TestTaintBasedEvictions(t *testing.T) { test.pod.Spec.Tolerations[0].TolerationSeconds = &test.tolerationSeconds } - test.pod, err = cs.CoreV1().Pods(testCtx.NS.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{}) + test.pod, err = cs.CoreV1().Pods(testCtx.NS.Name).Create(testCtx.Ctx, test.pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Test Failed: error: %q, while creating pod %q", err, klog.KObj(test.pod)) } @@ -349,7 +344,7 @@ func TestTaintBasedEvictions(t *testing.T) { if test.pod != nil { err = wait.PollImmediate(time.Second, time.Second*15, func() (bool, error) { - pod, err := cs.CoreV1().Pods(test.pod.Namespace).Get(context.TODO(), test.pod.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(test.pod.Namespace).Get(testCtx.Ctx, test.pod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -364,10 +359,10 @@ func TestTaintBasedEvictions(t *testing.T) { return false, nil }) if err != nil { - pod, _ := cs.CoreV1().Pods(testCtx.NS.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{}) + pod, _ := cs.CoreV1().Pods(testCtx.NS.Name).Get(testCtx.Ctx, test.pod.Name, metav1.GetOptions{}) t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.expectedWaitForPodCondition, pod) } - testutils.CleanupPods(cs, t, []*v1.Pod{test.pod}) + testutils.CleanupPods(testCtx.Ctx, cs, t, []*v1.Pod{test.pod}) } testutils.CleanupNodes(cs, t) }) diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go index c3e5d15ecf1..58798943c06 100644 --- a/test/integration/podgc/podgc_test.go +++ b/test/integration/podgc/podgc_test.go @@ -77,7 +77,6 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { t.Run(name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)() testCtx := setup(t, "podgc-orphaned") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet node := &v1.Node{ @@ -180,7 +179,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)() testCtx := setup(t, "podgc-out-of-service") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet node := &v1.Node{ diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index d7c02dab7ad..c5e7c55b06a 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -60,8 +60,12 @@ const ( // quota_test.go:100: Took 4.196205966s to scale up without quota // quota_test.go:115: Took 12.021640372s to scale up with quota func TestQuota(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Set up a API server - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -76,10 +80,6 @@ func TestQuota(t *testing.T) { ns2 := framework.CreateNamespaceOrDie(clientset, "non-quotaed", t) defer framework.DeleteNamespaceOrDie(clientset, ns2, t) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) rm := replicationcontroller.NewReplicationManager( informers.Core().V1().Pods(), @@ -290,8 +290,12 @@ plugins: t.Fatal(err) } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Set up an API server - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -306,10 +310,6 @@ plugins: ns := framework.CreateNamespaceOrDie(clientset, "quota", t) defer framework.DeleteNamespaceOrDie(clientset, ns, t) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) rm := replicationcontroller.NewReplicationManager( informers.Core().V1().Pods(), @@ -417,8 +417,12 @@ plugins: t.Fatal(err) } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Set up an API server - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -433,10 +437,6 @@ plugins: ns := framework.CreateNamespaceOrDie(clientset, "quota", t) defer framework.DeleteNamespaceOrDie(clientset, ns, t) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) rm := replicationcontroller.NewReplicationManager( informers.Core().V1().Pods(), diff --git a/test/integration/scheduler/bind/bind_test.go b/test/integration/scheduler/bind/bind_test.go index a633660aaea..a1b6c1693f9 100644 --- a/test/integration/scheduler/bind/bind_test.go +++ b/test/integration/scheduler/bind/bind_test.go @@ -28,9 +28,7 @@ import ( // TestDefaultBinder tests the binding process in the scheduler. func TestDefaultBinder(t *testing.T) { testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, "", nil), 0) - testutil.SyncInformerFactory(testCtx) - // Do not start scheduler routine. - defer testutil.CleanupTest(t, testCtx) + testutil.SyncSchedulerInformerFactory(testCtx) // Add a node. node, err := testutil.CreateNode(testCtx.ClientSet, st.MakeNode().Name("testnode").Obj()) @@ -56,7 +54,7 @@ func TestDefaultBinder(t *testing.T) { if err != nil { t.Fatalf("Failed to create pod: %v", err) } - defer testutil.CleanupPods(testCtx.ClientSet, t, []*corev1.Pod{pod}) + defer testutil.CleanupPods(testCtx.Ctx, testCtx.ClientSet, t, []*corev1.Pod{pod}) podCopy := pod.DeepCopy() if tc.anotherUID { diff --git a/test/integration/scheduler/extender/extender_test.go b/test/integration/scheduler/extender/extender_test.go index 76698aa40ae..1ba4a5614cb 100644 --- a/test/integration/scheduler/extender/extender_test.go +++ b/test/integration/scheduler/extender/extender_test.go @@ -354,9 +354,8 @@ func TestSchedulerExtender(t *testing.T) { } testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithExtenders(extenders...)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) DoTestPodScheduling(testCtx.NS, t, clientSet) } diff --git a/test/integration/scheduler/filters/filters_test.go b/test/integration/scheduler/filters/filters_test.go index a2f5aee00c3..07b08d704e9 100644 --- a/test/integration/scheduler/filters/filters_test.go +++ b/test/integration/scheduler/filters/filters_test.go @@ -35,6 +35,7 @@ import ( st "k8s.io/kubernetes/pkg/scheduler/testing" testutils "k8s.io/kubernetes/test/integration/util" imageutils "k8s.io/kubernetes/test/utils/image" + "k8s.io/kubernetes/test/utils/ktesting" "k8s.io/utils/pointer" ) @@ -66,7 +67,6 @@ var ( // anti-affinity predicate functions works correctly. func TestInterPodAffinity(t *testing.T) { testCtx := initTest(t, "") - defer testutils.CleanupTest(t, testCtx) // Add a few nodes with labels nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2) @@ -822,11 +822,13 @@ func TestInterPodAffinity(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + for _, pod := range test.pods { if pod.Namespace == "" { pod.Namespace = defaultNS } - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error while creating pod: %v", err) } @@ -839,7 +841,7 @@ func TestInterPodAffinity(t *testing.T) { test.pod.Namespace = defaultNS } - testPod, err := cs.CoreV1().Pods(test.pod.Namespace).Create(context.TODO(), test.pod, metav1.CreateOptions{}) + testPod, err := cs.CoreV1().Pods(test.pod.Namespace).Create(ctx, test.pod, metav1.CreateOptions{}) if err != nil { if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { t.Fatalf("Error while creating pod: %v", err) @@ -855,20 +857,22 @@ func TestInterPodAffinity(t *testing.T) { t.Errorf("Error while trying to fit a pod: %v", err) } - err = cs.CoreV1().Pods(test.pod.Namespace).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(test.pod.Namespace).Delete(ctx, test.pod.Name, *metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Error while deleting pod: %v", err) } - err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, testCtx.NS.Name, test.pod.Name)) + err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, true, + testutils.PodDeleted(ctx, cs, testCtx.NS.Name, test.pod.Name)) if err != nil { t.Errorf("Error while waiting for pod to get deleted: %v", err) } for _, pod := range test.pods { - err = cs.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Error while deleting pod: %v", err) } - err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, pod.Namespace, pod.Name)) + err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, true, + testutils.PodDeleted(ctx, cs, pod.Namespace, pod.Name)) if err != nil { t.Errorf("Error while waiting for pod to get deleted: %v", err) } @@ -990,7 +994,6 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testCtx := initTest(t, "") - defer testutils.CleanupTest(t, testCtx) // Add a few nodes with labels nodes, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2) @@ -1009,7 +1012,7 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) { } defaultNS := "ns1" - createdPod, err := cs.CoreV1().Pods(test.existingPod.Namespace).Create(context.TODO(), test.existingPod, metav1.CreateOptions{}) + createdPod, err := cs.CoreV1().Pods(test.existingPod.Namespace).Create(testCtx.Ctx, test.existingPod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error while creating pod: %v", err) } @@ -1022,7 +1025,7 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) { test.pod.Namespace = defaultNS } - testPod, err := cs.CoreV1().Pods(test.pod.Namespace).Create(context.TODO(), test.pod, metav1.CreateOptions{}) + testPod, err := cs.CoreV1().Pods(test.pod.Namespace).Create(testCtx.Ctx, test.pod, metav1.CreateOptions{}) if err != nil { if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) { t.Fatalf("Error while creating pod: %v", err) @@ -1037,20 +1040,21 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) { if err != nil { t.Errorf("Error while trying to fit a pod: %v", err) } - - err = cs.CoreV1().Pods(test.pod.Namespace).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(test.pod.Namespace).Delete(testCtx.Ctx, test.pod.Name, *metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Error while deleting pod: %v", err) } - err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, testCtx.NS.Name, test.pod.Name)) + err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, true, + testutils.PodDeleted(testCtx.Ctx, cs, testCtx.NS.Name, test.pod.Name)) if err != nil { t.Errorf("Error while waiting for pod to get deleted: %v", err) } - err = cs.CoreV1().Pods(test.existingPod.Namespace).Delete(context.TODO(), test.existingPod.Name, *metav1.NewDeleteOptions(0)) + err = cs.CoreV1().Pods(test.existingPod.Namespace).Delete(testCtx.Ctx, test.existingPod.Name, *metav1.NewDeleteOptions(0)) if err != nil { t.Errorf("Error while deleting pod: %v", err) } - err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, test.existingPod.Namespace, test.existingPod.Name)) + err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, true, + testutils.PodDeleted(testCtx.Ctx, cs, test.existingPod.Namespace, test.existingPod.Name)) if err != nil { t.Errorf("Error while waiting for pod to get deleted: %v", err) } @@ -1492,7 +1496,6 @@ func TestPodTopologySpreadFilter(t *testing.T) { testCtx := initTest(t, "pts-predicate") cs := testCtx.ClientSet ns := testCtx.NS.Name - defer testutils.CleanupTest(t, testCtx) for i := range tt.nodes { if _, err := createNode(cs, tt.nodes[i]); err != nil { @@ -1507,10 +1510,10 @@ func TestPodTopologySpreadFilter(t *testing.T) { tt.incomingPod.SetNamespace(ns) allPods := append(tt.existingPods, tt.incomingPod) - defer testutils.CleanupPods(cs, t, allPods) + defer testutils.CleanupPods(testCtx.Ctx, cs, t, allPods) for _, pod := range tt.existingPods { - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(testCtx.Ctx, pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error while creating pod during test: %v", err) } @@ -1519,7 +1522,7 @@ func TestPodTopologySpreadFilter(t *testing.T) { t.Errorf("Error while waiting for pod during test: %v", err) } } - testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{}) + testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(testCtx.Ctx, tt.incomingPod, metav1.CreateOptions{}) if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Error while creating pod during test: %v", err) } @@ -1761,7 +1764,6 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tt.enableReadWriteOncePod)() testCtx := initTest(t, "scheduler-informer") - defer testutils.CleanupTest(t, testCtx) if tt.init != nil { if err := tt.init(testCtx.ClientSet, testCtx.NS.Name); err != nil { diff --git a/test/integration/scheduler/plugins/plugins_test.go b/test/integration/scheduler/plugins/plugins_test.go index df7e4b5ae1c..2ed0eb76264 100644 --- a/test/integration/scheduler/plugins/plugins_test.go +++ b/test/integration/scheduler/plugins/plugins_test.go @@ -214,13 +214,6 @@ func (sp *ScorePlugin) Name() string { return scorePluginName } -// reset returns name of the score plugin. -func (sp *ScorePlugin) reset() { - sp.failScore = false - sp.numScoreCalled = 0 - sp.highScoreNode = "" -} - // Score returns the score of scheduling a pod on a specific node. func (sp *ScorePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) { curCalled := atomic.AddInt32(&sp.numScoreCalled, 1) @@ -246,12 +239,6 @@ func (sp *ScoreWithNormalizePlugin) Name() string { return scoreWithNormalizePluginName } -// reset returns name of the score plugin. -func (sp *ScoreWithNormalizePlugin) reset() { - sp.numScoreCalled = 0 - sp.numNormalizeScoreCalled = 0 -} - // Score returns the score of scheduling a pod on a specific node. func (sp *ScoreWithNormalizePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) { sp.numScoreCalled++ @@ -273,15 +260,6 @@ func (fp *FilterPlugin) Name() string { return filterPluginName } -// reset is used to reset filter plugin. -func (fp *FilterPlugin) reset() { - fp.numFilterCalled = 0 - fp.failFilter = false - if fp.numCalledPerPod != nil { - fp.numCalledPerPod = make(map[string]int) - } -} - // Filter is a test function that returns an error or nil, depending on the // value of "failFilter". func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { @@ -328,13 +306,6 @@ func (rp *ReservePlugin) Unreserve(ctx context.Context, state *framework.CycleSt } } -// reset used to reset internal counters. -func (rp *ReservePlugin) reset() { - rp.numReserveCalled = 0 - rp.numUnreserveCalled = 0 - rp.failReserve = false -} - // Name returns name of the plugin. func (*PreScorePlugin) Name() string { return preScorePluginName @@ -350,12 +321,6 @@ func (pfp *PreScorePlugin) PreScore(ctx context.Context, _ *framework.CycleState return nil } -// reset used to reset prescore plugin. -func (pfp *PreScorePlugin) reset() { - pfp.numPreScoreCalled = 0 - pfp.failPreScore = false -} - // Name returns name of the plugin. func (pp *PreBindPlugin) Name() string { return preBindPluginName @@ -377,15 +342,6 @@ func (pp *PreBindPlugin) PreBind(ctx context.Context, state *framework.CycleStat return nil } -// reset used to reset prebind plugin. -func (pp *PreBindPlugin) reset() { - pp.numPreBindCalled = 0 - pp.failPreBind = false - pp.rejectPreBind = false - pp.succeedOnRetry = false - pp.podUIDs = make(map[types.UID]struct{}) -} - const bindPluginAnnotation = "bindPluginName" func (bp *BindPlugin) Name() string { @@ -411,11 +367,6 @@ func (bp *BindPlugin) Bind(ctx context.Context, state *framework.CycleState, p * return bp.bindStatus } -// reset used to reset numBindCalled. -func (bp *BindPlugin) reset() { - bp.numBindCalled = 0 -} - // Name returns name of the plugin. func (pp *PostBindPlugin) Name() string { return pp.name @@ -429,11 +380,6 @@ func (pp *PostBindPlugin) PostBind(ctx context.Context, state *framework.CycleSt } } -// reset used to reset postbind plugin. -func (pp *PostBindPlugin) reset() { - pp.numPostBindCalled = 0 -} - // Name returns name of the plugin. func (pp *PreFilterPlugin) Name() string { return prefilterPluginName @@ -456,13 +402,6 @@ func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state *framework.Cycle return nil, nil } -// reset used to reset prefilter plugin. -func (pp *PreFilterPlugin) reset() { - pp.numPreFilterCalled = 0 - pp.failPreFilter = false - pp.rejectPreFilter = false -} - // Name returns name of the plugin. func (pp *PostFilterPlugin) Name() string { return pp.name @@ -551,31 +490,9 @@ func (pp *PermitPlugin) rejectAllPods() { pp.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { wp.Reject(pp.name, "rejectAllPods") }) } -// reset used to reset permit plugin. -func (pp *PermitPlugin) reset() { - pp.numPermitCalled = 0 - pp.failPermit = false - pp.rejectPermit = false - pp.timeoutPermit = false - pp.waitAndRejectPermit = false - pp.waitAndAllowPermit = false - pp.cancelled = false - pp.waitingPod = "" - pp.allowingPod = "" - pp.rejectingPod = "" -} - // TestPreFilterPlugin tests invocation of prefilter plugins. func TestPreFilterPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a pre-filter plugin. - preFilterPlugin := &PreFilterPlugin{} - registry, prof := initRegistryAndConfig(t, preFilterPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "prefilter-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "prefilter-plugin", nil) tests := []struct { name string @@ -601,6 +518,15 @@ func TestPreFilterPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a pre-filter plugin. + preFilterPlugin := &PreFilterPlugin{} + registry, prof := initRegistryAndConfig(t, preFilterPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + preFilterPlugin.failPreFilter = test.fail preFilterPlugin.rejectPreFilter = test.reject // Create a best effort pod. @@ -627,9 +553,6 @@ func TestPreFilterPlugin(t *testing.T) { if preFilterPlugin.numPreFilterCalled == 0 { t.Errorf("Expected the prefilter plugin to be called.") } - - preFilterPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -713,8 +636,9 @@ func TestPostFilterPlugin(t *testing.T) { } var postFilterPluginName2 = postfilterPluginName + "2" + testContext := testutils.InitTestAPIServer(t, "post-filter", nil) - for i, tt := range tests { + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Create a plugin registry for testing. Register a combination of filter and postFilter plugin. var ( @@ -771,15 +695,11 @@ func TestPostFilterPlugin(t *testing.T) { }, }}}) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, fmt.Sprintf("postfilter%v-", i), nil), - int(tt.numNodes), + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, int(tt.numNodes), scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry), ) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name})) @@ -820,14 +740,7 @@ func TestPostFilterPlugin(t *testing.T) { // TestScorePlugin tests invocation of score plugins. func TestScorePlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a score plugin. - scorePlugin := &ScorePlugin{} - registry, prof := initRegistryAndConfig(t, scorePlugin) - - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "score-plugin", nil), 10, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "score-plugin", nil) tests := []struct { name string @@ -845,6 +758,15 @@ func TestScorePlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a score plugin. + scorePlugin := &ScorePlugin{} + registry, prof := initRegistryAndConfig(t, scorePlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 10, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + scorePlugin.failScore = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -873,9 +795,6 @@ func TestScorePlugin(t *testing.T) { if numScoreCalled := atomic.LoadInt32(&scorePlugin.numScoreCalled); numScoreCalled == 0 { t.Errorf("Expected the score plugin to be called.") } - - scorePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -886,12 +805,10 @@ func TestNormalizeScorePlugin(t *testing.T) { scoreWithNormalizePlugin := &ScoreWithNormalizePlugin{} registry, prof := initRegistryAndConfig(t, scoreWithNormalizePlugin) - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "score-plugin", nil), 10, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "score-plugin", nil), 10, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) - // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name})) @@ -909,21 +826,11 @@ func TestNormalizeScorePlugin(t *testing.T) { if scoreWithNormalizePlugin.numNormalizeScoreCalled == 0 { t.Error("Expected the normalize score plugin to be called") } - - scoreWithNormalizePlugin.reset() } // TestReservePlugin tests invocation of reserve plugins. func TestReservePluginReserve(t *testing.T) { - // Create a plugin registry for testing. Register only a reserve plugin. - reservePlugin := &ReservePlugin{} - registry, prof := initRegistryAndConfig(t, reservePlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "reserve-plugin-reserve", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "reserve-plugin-reserve", nil) tests := []struct { name string @@ -941,6 +848,15 @@ func TestReservePluginReserve(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a reserve plugin. + reservePlugin := &ReservePlugin{} + registry, prof := initRegistryAndConfig(t, reservePlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + reservePlugin.failReserve = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -963,57 +879,15 @@ func TestReservePluginReserve(t *testing.T) { if reservePlugin.numReserveCalled == 0 { t.Errorf("Expected the reserve plugin to be called.") } - - reservePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPrebindPlugin tests invocation of prebind plugins. func TestPrebindPlugin(t *testing.T) { - // Create a plugin registry for testing. Register a prebind and a filter plugin. - preBindPlugin := &PreBindPlugin{podUIDs: make(map[types.UID]struct{})} - filterPlugin := &FilterPlugin{} - registry := frameworkruntime.Registry{ - preBindPluginName: newPlugin(preBindPlugin), - filterPluginName: newPlugin(filterPlugin), - } + testContext := testutils.InitTestAPIServer(t, "prebind-plugin", nil) - // Setup initial prebind and filter plugin in different profiles. - // The second profile ensures the embedded filter plugin is exclusively called, and hence - // we can use its internal `numFilterCalled` to perform some precise checking logic. - cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ - Profiles: []configv1.KubeSchedulerProfile{ - { - SchedulerName: pointer.String(v1.DefaultSchedulerName), - Plugins: &configv1.Plugins{ - PreBind: configv1.PluginSet{ - Enabled: []configv1.Plugin{ - {Name: preBindPluginName}, - }, - }, - }, - }, - { - SchedulerName: pointer.String("2nd-scheduler"), - Plugins: &configv1.Plugins{ - Filter: configv1.PluginSet{ - Enabled: []configv1.Plugin{ - {Name: filterPluginName}, - }, - }, - }, - }, - }, - }) - - // Create the API server and the scheduler with the test plugin set. nodesNum := 2 - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "prebind-plugin", nil), nodesNum, - scheduler.WithProfiles(cfg.Profiles...), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) tests := []struct { name string @@ -1051,19 +925,59 @@ func TestPrebindPlugin(t *testing.T) { { name: "failure on preBind moves unschedulable pods", fail: true, - unschedulablePod: st.MakePod().Name("unschedulable-pod").Namespace(testCtx.NS.Name).Container(imageutils.GetPauseImageName()).Obj(), + unschedulablePod: st.MakePod().Name("unschedulable-pod").Namespace(testContext.NS.Name).Container(imageutils.GetPauseImageName()).Obj(), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register a prebind and a filter plugin. + preBindPlugin := &PreBindPlugin{podUIDs: make(map[types.UID]struct{})} + filterPlugin := &FilterPlugin{} + registry := frameworkruntime.Registry{ + preBindPluginName: newPlugin(preBindPlugin), + filterPluginName: newPlugin(filterPlugin), + } + + // Setup initial prebind and filter plugin in different profiles. + // The second profile ensures the embedded filter plugin is exclusively called, and hence + // we can use its internal `numFilterCalled` to perform some precise checking logic. + cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{ + { + SchedulerName: pointer.String(v1.DefaultSchedulerName), + Plugins: &configv1.Plugins{ + PreBind: configv1.PluginSet{ + Enabled: []configv1.Plugin{ + {Name: preBindPluginName}, + }, + }, + }, + }, + { + SchedulerName: pointer.String("2nd-scheduler"), + Plugins: &configv1.Plugins{ + Filter: configv1.PluginSet{ + Enabled: []configv1.Plugin{ + {Name: filterPluginName}, + }, + }, + }, + }, + }, + }) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, nodesNum, + scheduler.WithProfiles(cfg.Profiles...), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + if p := test.unschedulablePod; p != nil { p.Spec.SchedulerName = "2nd-scheduler" filterPlugin.rejectFilter = true if _, err := createPausePod(testCtx.ClientSet, p); err != nil { t.Fatalf("Error while creating an unschedulable pod: %v", err) } - defer filterPlugin.reset() } preBindPlugin.failPreBind = test.fail @@ -1105,9 +1019,6 @@ func TestPrebindPlugin(t *testing.T) { t.Errorf("Timed out waiting for the unschedulable Pod to be retried at least twice.") } } - - preBindPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -1201,6 +1112,8 @@ func TestUnReserveReservePlugins(t *testing.T) { }, } + testContext := testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil) + for _, test := range tests { t.Run(test.name, func(t *testing.T) { var pls []framework.Plugin @@ -1209,14 +1122,10 @@ func TestUnReserveReservePlugins(t *testing.T) { } registry, prof := initRegistryAndConfig(t, pls...) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil), - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. podName := "test-pod" @@ -1260,13 +1169,14 @@ func TestUnReserveReservePlugins(t *testing.T) { } } } - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestUnReservePermitPlugins tests unreserve of Permit plugins. func TestUnReservePermitPlugins(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil) + tests := []struct { name string plugin *PermitPlugin @@ -1305,14 +1215,10 @@ func TestUnReservePermitPlugins(t *testing.T) { } registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, "unreserve-reserve-plugin", nil), - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(profile), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. podName := "test-pod" @@ -1345,14 +1251,14 @@ func TestUnReservePermitPlugins(t *testing.T) { if test.plugin.numPermitCalled != 1 { t.Errorf("Expected the Permit plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestUnReservePreBindPlugins tests unreserve of Prebind plugins. func TestUnReservePreBindPlugins(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "unreserve-prebind-plugin", nil) + tests := []struct { name string plugin *PreBindPlugin @@ -1381,14 +1287,10 @@ func TestUnReservePreBindPlugins(t *testing.T) { } registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - testutils.InitTestAPIServer(t, "unreserve-prebind-plugin", nil), - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(profile), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a pause pod. podName := "test-pod" @@ -1421,14 +1323,14 @@ func TestUnReservePreBindPlugins(t *testing.T) { if test.plugin.numPreBindCalled != 1 { t.Errorf("Expected the Prebind plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestUnReserveBindPlugins tests unreserve of Bind plugins. func TestUnReserveBindPlugins(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "unreserve-bind-plugin", nil) + tests := []struct { name string plugin *BindPlugin @@ -1454,17 +1356,12 @@ func TestUnReserveBindPlugins(t *testing.T) { } registry, profile := initRegistryAndConfig(t, []framework.Plugin{test.plugin, reservePlugin}...) - apiCtx := testutils.InitTestAPIServer(t, "unreserve-bind-plugin", nil) - test.plugin.client = apiCtx.ClientSet + test.plugin.client = testContext.ClientSet - // Create the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest( - t, - apiCtx, - 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(profile), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a pause pod. podName := "test-pod" @@ -1497,8 +1394,6 @@ func TestUnReserveBindPlugins(t *testing.T) { if test.plugin.numBindCalled != 1 { t.Errorf("Expected the Bind plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -1508,61 +1403,16 @@ type pluginInvokeEvent struct { val int } -// TestBindPlugin tests invocation of bind plugins. func TestBindPlugin(t *testing.T) { + + var ( + bindPlugin1Name = "bind-plugin-1" + bindPlugin2Name = "bind-plugin-2" + reservePluginName = "mock-reserve-plugin" + postBindPluginName = "mock-post-bind-plugin" + ) + testContext := testutils.InitTestAPIServer(t, "bind-plugin", nil) - bindPlugin1 := &BindPlugin{name: "bind-plugin-1", client: testContext.ClientSet} - bindPlugin2 := &BindPlugin{name: "bind-plugin-2", client: testContext.ClientSet} - reservePlugin := &ReservePlugin{name: "mock-reserve-plugin"} - postBindPlugin := &PostBindPlugin{name: "mock-post-bind-plugin"} - // Create a plugin registry for testing. Register reserve, bind, and - // postBind plugins. - - registry := frameworkruntime.Registry{ - reservePlugin.Name(): newPlugin(reservePlugin), - bindPlugin1.Name(): newPlugin(bindPlugin1), - bindPlugin2.Name(): newPlugin(bindPlugin2), - postBindPlugin.Name(): newPlugin(postBindPlugin), - } - - // Setup initial unreserve and bind plugins for testing. - cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ - Profiles: []configv1.KubeSchedulerProfile{{ - SchedulerName: pointer.String(v1.DefaultSchedulerName), - Plugins: &configv1.Plugins{ - MultiPoint: configv1.PluginSet{ - Disabled: []configv1.Plugin{ - {Name: defaultbinder.Name}, - }, - }, - Reserve: configv1.PluginSet{ - Enabled: []configv1.Plugin{{Name: reservePlugin.Name()}}, - }, - Bind: configv1.PluginSet{ - // Put DefaultBinder last. - Enabled: []configv1.Plugin{{Name: bindPlugin1.Name()}, {Name: bindPlugin2.Name()}, {Name: defaultbinder.Name}}, - Disabled: []configv1.Plugin{{Name: defaultbinder.Name}}, - }, - PostBind: configv1.PluginSet{ - Enabled: []configv1.Plugin{{Name: postBindPlugin.Name()}}, - }, - }, - }}, - }) - - // Create the scheduler with the test plugin set. - testCtx := testutils.InitTestSchedulerWithOptions(t, testContext, 0, - scheduler.WithProfiles(cfg.Profiles...), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - testutils.SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) - - // Add a few nodes. - _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode(), 2) - if err != nil { - t.Fatal(err) - } tests := []struct { name string @@ -1576,32 +1426,77 @@ func TestBindPlugin(t *testing.T) { name: "bind plugins skipped to bind the pod and scheduler bond the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Skip, ""), framework.NewStatus(framework.Skip, "")}, expectBoundByScheduler: true, - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: bindPlugin2.Name(), val: 1}, {pluginName: postBindPlugin.Name(), val: 1}}, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: bindPlugin2Name, val: 1}, {pluginName: postBindPluginName, val: 1}}, }, { name: "bindplugin2 succeeded to bind the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Skip, ""), framework.NewStatus(framework.Success, "")}, expectBoundByPlugin: true, - expectBindPluginName: bindPlugin2.Name(), - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: bindPlugin2.Name(), val: 1}, {pluginName: postBindPlugin.Name(), val: 1}}, + expectBindPluginName: bindPlugin2Name, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: bindPlugin2Name, val: 1}, {pluginName: postBindPluginName, val: 1}}, }, { name: "bindplugin1 succeeded to bind the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Success, ""), framework.NewStatus(framework.Success, "")}, expectBoundByPlugin: true, - expectBindPluginName: bindPlugin1.Name(), - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: postBindPlugin.Name(), val: 1}}, + expectBindPluginName: bindPlugin1Name, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: postBindPluginName, val: 1}}, }, { name: "bind plugin fails to bind the pod", bindPluginStatuses: []*framework.Status{framework.NewStatus(framework.Error, "failed to bind"), framework.NewStatus(framework.Success, "")}, - expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1.Name(), val: 1}, {pluginName: reservePlugin.Name(), val: 1}}, + expectInvokeEvents: []pluginInvokeEvent{{pluginName: bindPlugin1Name, val: 1}, {pluginName: reservePluginName, val: 1}}, }, } var pluginInvokeEventChan chan pluginInvokeEvent for _, test := range tests { t.Run(test.name, func(t *testing.T) { + bindPlugin1 := &BindPlugin{name: bindPlugin1Name, client: testContext.ClientSet} + bindPlugin2 := &BindPlugin{name: bindPlugin2Name, client: testContext.ClientSet} + reservePlugin := &ReservePlugin{name: reservePluginName} + postBindPlugin := &PostBindPlugin{name: postBindPluginName} + + // Create a plugin registry for testing. Register reserve, bind, and + // postBind plugins. + registry := frameworkruntime.Registry{ + reservePlugin.Name(): newPlugin(reservePlugin), + bindPlugin1.Name(): newPlugin(bindPlugin1), + bindPlugin2.Name(): newPlugin(bindPlugin2), + postBindPlugin.Name(): newPlugin(postBindPlugin), + } + + // Setup initial unreserve and bind plugins for testing. + cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{{ + SchedulerName: pointer.String(v1.DefaultSchedulerName), + Plugins: &configv1.Plugins{ + MultiPoint: configv1.PluginSet{ + Disabled: []configv1.Plugin{ + {Name: defaultbinder.Name}, + }, + }, + Reserve: configv1.PluginSet{ + Enabled: []configv1.Plugin{{Name: reservePlugin.Name()}}, + }, + Bind: configv1.PluginSet{ + // Put DefaultBinder last. + Enabled: []configv1.Plugin{{Name: bindPlugin1.Name()}, {Name: bindPlugin2.Name()}, {Name: defaultbinder.Name}}, + Disabled: []configv1.Plugin{{Name: defaultbinder.Name}}, + }, + PostBind: configv1.PluginSet{ + Enabled: []configv1.Plugin{{Name: postBindPlugin.Name()}}, + }, + }, + }}, + }) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(cfg.Profiles...), + scheduler.WithFrameworkOutOfTreeRegistry(registry), + ) + defer teardown() + pluginInvokeEventChan = make(chan pluginInvokeEvent, 10) bindPlugin1.bindStatus = test.bindPluginStatuses[0] @@ -1678,17 +1573,14 @@ func TestBindPlugin(t *testing.T) { t.Errorf("Waiting for invoke event %d timeout.", j) } } - postBindPlugin.reset() - bindPlugin1.reset() - bindPlugin2.reset() - reservePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPostBindPlugin tests invocation of postbind plugins. func TestPostBindPlugin(t *testing.T) { + testContext := testutils.InitTestAPIServer(t, "postbind-plugin", nil) + tests := []struct { name string preBindFail bool @@ -1716,11 +1608,10 @@ func TestPostBindPlugin(t *testing.T) { } registry, prof := initRegistryAndConfig(t, preBindPlugin, postBindPlugin) - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "postbind-plugin", nil), 2, + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + defer teardown() // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -1749,23 +1640,13 @@ func TestPostBindPlugin(t *testing.T) { t.Errorf("Expected the postbind plugin to be called, was called %d times.", postBindPlugin.numPostBindCalled) } } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPermitPlugin tests invocation of permit plugins. func TestPermitPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a permit plugin. - perPlugin := &PermitPlugin{name: permitPluginName} - registry, prof := initRegistryAndConfig(t, perPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "permit-plugin", nil) tests := []struct { name string @@ -1813,6 +1694,16 @@ func TestPermitPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + + // Create a plugin registry for testing. Register only a permit plugin. + perPlugin := &PermitPlugin{name: permitPluginName} + registry, prof := initRegistryAndConfig(t, perPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + perPlugin.failPermit = test.fail perPlugin.rejectPermit = test.reject perPlugin.timeoutPermit = test.timeout @@ -1844,9 +1735,6 @@ func TestPermitPlugin(t *testing.T) { if perPlugin.numPermitCalled == 0 { t.Errorf("Expected the permit plugin to be called.") } - - perPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -1859,10 +1747,9 @@ func TestMultiplePermitPlugins(t *testing.T) { registry, prof := initRegistryAndConfig(t, perPlugin1, perPlugin2) // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "multi-permit-plugin", nil), 2, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "multi-permit-plugin", nil), 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) // Both permit plugins will return Wait for permitting perPlugin1.timeoutPermit = true @@ -1902,8 +1789,6 @@ func TestMultiplePermitPlugins(t *testing.T) { if perPlugin1.numPermitCalled == 0 || perPlugin2.numPermitCalled == 0 { t.Errorf("Expected the permit plugin to be called.") } - - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) } // TestPermitPluginsCancelled tests whether all permit plugins are cancelled when pod is rejected. @@ -1914,10 +1799,9 @@ func TestPermitPluginsCancelled(t *testing.T) { registry, prof := initRegistryAndConfig(t, perPlugin1, perPlugin2) // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugins", nil), 2, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugins", nil), 2, scheduler.WithProfiles(prof), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) // Both permit plugins will return Wait for permitting perPlugin1.timeoutPermit = true @@ -1950,16 +1834,7 @@ func TestPermitPluginsCancelled(t *testing.T) { // TestCoSchedulingWithPermitPlugin tests invocation of permit plugins. func TestCoSchedulingWithPermitPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a permit plugin. - permitPlugin := &PermitPlugin{name: permitPluginName} - registry, prof := initRegistryAndConfig(t, permitPlugin) - - // Create the API server and the scheduler with the test plugin set. - // TODO Make the subtests not share scheduler instances. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "permit-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "permit-plugin", nil) tests := []struct { name string @@ -1980,6 +1855,16 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + + // Create a plugin registry for testing. Register only a permit plugin. + permitPlugin := &PermitPlugin{name: permitPluginName} + registry, prof := initRegistryAndConfig(t, permitPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + permitPlugin.failPermit = false permitPlugin.rejectPermit = false permitPlugin.timeoutPermit = false @@ -2028,24 +1913,13 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) { if permitPlugin.numPermitCalled == 0 { t.Errorf("Expected the permit plugin to be called.") } - - permitPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{podA, podB}) }) } } // TestFilterPlugin tests invocation of filter plugins. func TestFilterPlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a filter plugin. - filterPlugin := &FilterPlugin{} - registry, prof := initRegistryAndConfig(t, filterPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "filter-plugin", nil), 1, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "filter-plugin", nil) tests := []struct { name string @@ -2063,6 +1937,15 @@ func TestFilterPlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a filter plugin. + filterPlugin := &FilterPlugin{} + registry, prof := initRegistryAndConfig(t, filterPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 1, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + filterPlugin.failFilter = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -2086,24 +1969,13 @@ func TestFilterPlugin(t *testing.T) { t.Errorf("Expected the filter plugin to be called 1 time, but got %v.", filterPlugin.numFilterCalled) } } - - filterPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } // TestPreScorePlugin tests invocation of pre-score plugins. func TestPreScorePlugin(t *testing.T) { - // Create a plugin registry for testing. Register only a pre-score plugin. - preScorePlugin := &PreScorePlugin{} - registry, prof := initRegistryAndConfig(t, preScorePlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "pre-score-plugin", nil), 2, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "pre-score-plugin", nil) tests := []struct { name string @@ -2121,6 +1993,15 @@ func TestPreScorePlugin(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a pre-score plugin. + preScorePlugin := &PreScorePlugin{} + registry, prof := initRegistryAndConfig(t, preScorePlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 2, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + preScorePlugin.failPreScore = test.fail // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, @@ -2142,9 +2023,6 @@ func TestPreScorePlugin(t *testing.T) { if preScorePlugin.numPreScoreCalled == 0 { t.Errorf("Expected the pre-score plugin to be called.") } - - preScorePlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -2153,17 +2031,7 @@ func TestPreScorePlugin(t *testing.T) { func TestPreEnqueuePlugin(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodSchedulingReadiness, true)() - // Create a plugin registry for testing. Register only a filter plugin. - enqueuePlugin := &PreEnqueuePlugin{} - // Plumb a preFilterPlugin to verify if it's called or not. - preFilterPlugin := &PreFilterPlugin{} - registry, prof := initRegistryAndConfig(t, enqueuePlugin, preFilterPlugin) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "enqueue-plugin", nil), 1, - scheduler.WithProfiles(prof), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) + testContext := testutils.InitTestAPIServer(t, "enqueue-plugin", nil) tests := []struct { name string @@ -2172,18 +2040,29 @@ func TestPreEnqueuePlugin(t *testing.T) { }{ { name: "pod is admitted to enqueue", - pod: st.MakePod().Name("p").Namespace(testCtx.NS.Name).Container("pause").Obj(), + pod: st.MakePod().Name("p").Namespace(testContext.NS.Name).Container("pause").Obj(), admitEnqueue: true, }, { name: "pod is not admitted to enqueue", - pod: st.MakePod().Name("p").Namespace(testCtx.NS.Name).SchedulingGates([]string{"foo"}).Container("pause").Obj(), + pod: st.MakePod().Name("p").Namespace(testContext.NS.Name).SchedulingGates([]string{"foo"}).Container("pause").Obj(), admitEnqueue: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Create a plugin registry for testing. Register only a filter plugin. + enqueuePlugin := &PreEnqueuePlugin{} + // Plumb a preFilterPlugin to verify if it's called or not. + preFilterPlugin := &PreFilterPlugin{} + registry, prof := initRegistryAndConfig(t, enqueuePlugin, preFilterPlugin) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 1, + scheduler.WithProfiles(prof), + scheduler.WithFrameworkOutOfTreeRegistry(registry)) + defer teardown() + enqueuePlugin.admit = tt.admitEnqueue // Create a best effort pod. pod, err := createPausePod(testCtx.ClientSet, tt.pod) @@ -2208,9 +2087,6 @@ func TestPreEnqueuePlugin(t *testing.T) { t.Errorf("Expected the preFilter plugin not to be called, but got %v", preFilterPlugin.numPreFilterCalled) } } - - preFilterPlugin.reset() - testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod}) }) } } @@ -2223,61 +2099,9 @@ func TestPreEnqueuePlugin(t *testing.T) { // // - when waitingPods get deleted externally, it'd trigger moving unschedulable Pods func TestPreemptWithPermitPlugin(t *testing.T) { - // Create a plugin registry for testing. Register a permit and a filter plugin. - permitPlugin := &PermitPlugin{} - // Inject a fake filter plugin to use its internal `numFilterCalled` to verify - // how many times a Pod gets tried scheduling. - filterPlugin := &FilterPlugin{numCalledPerPod: make(map[string]int)} - registry := frameworkruntime.Registry{ - permitPluginName: newPlugin(permitPlugin), - filterPluginName: newPlugin(filterPlugin), - } + testContext := testutils.InitTestAPIServer(t, "preempt-with-permit-plugin", nil) - // Setup initial permit and filter plugins in the profile. - cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ - Profiles: []configv1.KubeSchedulerProfile{ - { - SchedulerName: pointer.String(v1.DefaultSchedulerName), - Plugins: &configv1.Plugins{ - Permit: configv1.PluginSet{ - Enabled: []configv1.Plugin{ - {Name: permitPluginName}, - }, - }, - Filter: configv1.PluginSet{ - // Ensure the fake filter plugin is always called; otherwise noderesources - // would fail first and exit the Filter phase. - Enabled: []configv1.Plugin{ - {Name: filterPluginName}, - {Name: noderesources.Name}, - }, - Disabled: []configv1.Plugin{ - {Name: noderesources.Name}, - }, - }, - }, - }, - }, - }) - - // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "preempt-with-permit-plugin", nil), 0, - scheduler.WithProfiles(cfg.Profiles...), - scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) - - // Add one node. - nodeRes := map[v1.ResourceName]string{ - v1.ResourcePods: "32", - v1.ResourceCPU: "500m", - v1.ResourceMemory: "500", - } - _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode().Capacity(nodeRes), 1) - if err != nil { - t.Fatal(err) - } - - ns := testCtx.NS.Name + ns := testContext.NS.Name lowPriority, highPriority := int32(100), int32(300) resReq := map[v1.ResourceName]string{ v1.ResourceCPU: "200m", @@ -2288,6 +2112,12 @@ func TestPreemptWithPermitPlugin(t *testing.T) { v1.ResourceMemory: "400", } + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", + } + tests := []struct { name string deleteWaitingPod bool @@ -2320,17 +2150,53 @@ func TestPreemptWithPermitPlugin(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - defer func() { - permitPlugin.reset() - filterPlugin.reset() - var pods []*v1.Pod - for _, p := range []*v1.Pod{tt.runningPod, tt.waitingPod, tt.preemptor} { - if p != nil { - pods = append(pods, p) - } - } - testutils.CleanupPods(testCtx.ClientSet, t, pods) - }() + // Create a plugin registry for testing. Register a permit and a filter plugin. + permitPlugin := &PermitPlugin{} + // Inject a fake filter plugin to use its internal `numFilterCalled` to verify + // how many times a Pod gets tried scheduling. + filterPlugin := &FilterPlugin{numCalledPerPod: make(map[string]int)} + registry := frameworkruntime.Registry{ + permitPluginName: newPlugin(permitPlugin), + filterPluginName: newPlugin(filterPlugin), + } + + // Setup initial permit and filter plugins in the profile. + cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{ + Profiles: []configv1.KubeSchedulerProfile{ + { + SchedulerName: pointer.String(v1.DefaultSchedulerName), + Plugins: &configv1.Plugins{ + Permit: configv1.PluginSet{ + Enabled: []configv1.Plugin{ + {Name: permitPluginName}, + }, + }, + Filter: configv1.PluginSet{ + // Ensure the fake filter plugin is always called; otherwise noderesources + // would fail first and exit the Filter phase. + Enabled: []configv1.Plugin{ + {Name: filterPluginName}, + {Name: noderesources.Name}, + }, + Disabled: []configv1.Plugin{ + {Name: noderesources.Name}, + }, + }, + }, + }, + }, + }) + + testCtx, teardown := initTestSchedulerForFrameworkTest(t, testContext, 0, + scheduler.WithProfiles(cfg.Profiles...), + scheduler.WithFrameworkOutOfTreeRegistry(registry), + ) + defer teardown() + + _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode().Capacity(nodeRes), 1) + if err != nil { + t.Fatal(err) + } permitPlugin.waitAndAllowPermit = true permitPlugin.waitingPod = "waiting-pod" @@ -2502,10 +2368,9 @@ func TestActivatePods(t *testing.T) { }) // Create the API server and the scheduler with the test plugin set. - testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "job-plugin", nil), 1, + testCtx, _ := initTestSchedulerForFrameworkTest(t, testutils.InitTestAPIServer(t, "job-plugin", nil), 1, scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet ns := testCtx.NS.Name @@ -2549,10 +2414,17 @@ func TestActivatePods(t *testing.T) { } } -func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) *testutils.TestContext { +// The returned shutdown func will delete created resources and scheduler, resources should be those +// that will affect the scheduling result, like nodes, pods, etc.. Namespaces should not be +// deleted here because it's created together with the apiserver, they should be deleted +// simultaneously or we'll have no namespace. +// This should only be called when you want to kill the scheduler alone, away from apiserver. +// For example, in scheduler integration tests, recreating apiserver is performance consuming, +// then shutdown the scheduler and recreate it between each test case is a better approach. +func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) (*testutils.TestContext, testutils.ShutdownFunc) { testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, opts...) - testutils.SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) + testutils.SyncSchedulerInformerFactory(testCtx) + go testCtx.Scheduler.Run(testCtx.SchedulerCtx) if nodeCount > 0 { if _, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode(), nodeCount); err != nil { @@ -2561,7 +2433,28 @@ func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestCont t.Fatal(err) } } - return testCtx + + teardown := func() { + err := testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.SchedulerCtx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}) + if err != nil { + t.Errorf("error while deleting all nodes: %v", err) + } + err = testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).DeleteCollection(testCtx.SchedulerCtx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}) + if err != nil { + t.Errorf("error while deleting pod: %v", err) + } + // Wait for all pods to be deleted, or will failed to create same name pods + // required in other test cases. + err = wait.PollUntilContextTimeout(testCtx.SchedulerCtx, time.Millisecond, wait.ForeverTestTimeout, true, + testutils.PodsCleanedUp(testCtx.SchedulerCtx, testCtx.ClientSet, testCtx.NS.Name)) + if err != nil { + t.Errorf("error while waiting for all pods to be deleted: %v", err) + } + // Kill the scheduler. + testCtx.SchedulerCloseFn() + } + + return testCtx, teardown } // initRegistryAndConfig returns registry and plugins config based on give plugins. diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go index ceac2107db1..f76f9ba21d1 100644 --- a/test/integration/scheduler/preemption/preemption_test.go +++ b/test/integration/scheduler/preemption/preemption_test.go @@ -178,10 +178,9 @@ func TestPreemption(t *testing.T) { 0, scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ @@ -491,7 +490,7 @@ func TestPreemption(t *testing.T) { // Cleanup pods = append(pods, preemptor) - testutils.CleanupPods(cs, t, pods) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) }) } } @@ -501,7 +500,6 @@ func TestNonPreemption(t *testing.T) { var preemptNever = v1.PreemptNever // Initialize scheduler. testCtx := initTest(t, "non-preemption") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { name string @@ -548,7 +546,7 @@ func TestNonPreemption(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - defer testutils.CleanupPods(cs, t, []*v1.Pod{preemptor, victim}) + defer testutils.CleanupPods(testCtx.Ctx, cs, t, []*v1.Pod{preemptor, victim}) preemptor.Spec.PreemptionPolicy = test.PreemptionPolicy victimPod, err := createPausePod(cs, victim) if err != nil { @@ -579,7 +577,6 @@ func TestNonPreemption(t *testing.T) { func TestDisablePreemption(t *testing.T) { // Initialize scheduler, and disable preemption. testCtx := initTestDisablePreemption(t, "disable-preemption") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { @@ -650,7 +647,7 @@ func TestDisablePreemption(t *testing.T) { // Cleanup pods = append(pods, preemptor) - testutils.CleanupPods(cs, t, pods) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) }) } } @@ -659,7 +656,6 @@ func TestDisablePreemption(t *testing.T) { func TestPodPriorityResolution(t *testing.T) { admission := priority.NewPlugin() testCtx := testutils.InitTestScheduler(t, testutils.InitTestAPIServer(t, "preemption", admission)) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet // Build clientset and informers for controllers. @@ -671,7 +667,7 @@ func TestPodPriorityResolution(t *testing.T) { admission.SetExternalKubeInformerFactory(externalInformers) // Waiting for all controllers to sync - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) externalInformers.Start(testCtx.Ctx.Done()) externalInformers.WaitForCacheSync(testCtx.Ctx.Done()) @@ -754,7 +750,7 @@ func TestPodPriorityResolution(t *testing.T) { }) }) } - testutils.CleanupPods(cs, t, pods) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) testutils.CleanupNodes(cs, t) } @@ -780,7 +776,6 @@ func mkPriorityPodWithGrace(tc *testutils.TestContext, name string, priority int func TestPreemptionStarvation(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "preemption") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { @@ -869,7 +864,7 @@ func TestPreemptionStarvation(t *testing.T) { allPods := pendingPods allPods = append(allPods, runningPods...) allPods = append(allPods, preemptor) - testutils.CleanupPods(cs, t, allPods) + testutils.CleanupPods(testCtx.Ctx, cs, t, allPods) }) } } @@ -879,7 +874,6 @@ func TestPreemptionStarvation(t *testing.T) { func TestPreemptionRaces(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "preemption-race") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet tests := []struct { @@ -966,7 +960,7 @@ func TestPreemptionRaces(t *testing.T) { klog.Info("Check unschedulable pods still exists and were never scheduled...") for _, p := range additionalPods { - pod, err := cs.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{}) + pod, err := cs.CoreV1().Pods(p.Namespace).Get(testCtx.Ctx, p.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Error in getting Pod %v/%v info: %v", p.Namespace, p.Name, err) } @@ -983,7 +977,7 @@ func TestPreemptionRaces(t *testing.T) { allPods := additionalPods allPods = append(allPods, initialPods...) allPods = append(allPods, preemptor) - testutils.CleanupPods(cs, t, allPods) + testutils.CleanupPods(testCtx.Ctx, cs, t, allPods) } }) } @@ -1136,9 +1130,6 @@ func TestNominatedNodeCleanUp(t *testing.T) { scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(tt.outOfTreeRegistry), ) - t.Cleanup(func() { - testutils.CleanupTest(t, testCtx) - }) cs, ns := testCtx.ClientSet, testCtx.NS.Name // Create a node with the specified capacity. @@ -1227,7 +1218,6 @@ func addPodConditionReady(pod *v1.Pod) { func TestPDBInPreemption(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "preemption-pdb") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet initDisruptionController(t, testCtx) @@ -1471,16 +1461,20 @@ func TestPDBInPreemption(t *testing.T) { // Cleanup pods = append(pods, preemptor) - testutils.CleanupPods(cs, t, pods) - cs.PolicyV1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) - cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) + if err := cs.PolicyV1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { + t.Errorf("error while deleting PDBs, error: %v", err) + } + if err := cs.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { + t.Errorf("error whiling deleting nodes, error: %v", err) + } }) } } func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext { testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), 0, opts...) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) // wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set. f := testCtx.Scheduler.NextPod testCtx.Scheduler.NextPod = func() (podInfo *framework.QueuedPodInfo) { @@ -1561,9 +1555,6 @@ func TestPreferNominatedNode(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node") - t.Cleanup(func() { - testutils.CleanupTest(t, testCtx) - }) cs := testCtx.ClientSet nsName := testCtx.NS.Name var err error @@ -1637,10 +1628,9 @@ func TestReadWriteOncePodPreemption(t *testing.T) { testutils.InitTestAPIServer(t, "preemption", nil), 0, scheduler.WithProfiles(cfg.Profiles...)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet storage := v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}} @@ -1921,7 +1911,7 @@ func TestReadWriteOncePodPreemption(t *testing.T) { pods := make([]*v1.Pod, len(test.existingPods)) t.Cleanup(func() { - testutils.CleanupPods(cs, t, pods) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) if err := test.cleanup(); err != nil { t.Errorf("Error cleaning up test: %v", err) } diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go index 05a788d20df..3261452f6a3 100644 --- a/test/integration/scheduler/queue_test.go +++ b/test/integration/scheduler/queue_test.go @@ -124,8 +124,7 @@ func TestSchedulingGates(t *testing.T) { scheduler.WithPodInitialBackoffSeconds(0), scheduler.WithPodMaxBackoffSeconds(0), ) - testutils.SyncInformerFactory(testCtx) - defer testutils.CleanupTest(t, testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx for _, p := range tt.pods { @@ -186,9 +185,8 @@ func TestCoreResourceEnqueue(t *testing.T) { scheduler.WithPodInitialBackoffSeconds(0), scheduler.WithPodMaxBackoffSeconds(0), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) - defer testutils.CleanupTest(t, testCtx) defer testCtx.Scheduler.SchedulingQueue.Close() cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx @@ -293,8 +291,12 @@ func TestCustomResourceEnqueue(t *testing.T) { testfwk.SharedEtcd(), ) testCtx := &testutils.TestContext{} - testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background()) - testCtx.CloseFn = func() { server.TearDownFn() } + ctx, cancel := context.WithCancel(context.Background()) + testCtx.Ctx = ctx + testCtx.CloseFn = func() { + cancel() + server.TearDownFn() + } apiExtensionClient := apiextensionsclient.NewForConfigOrDie(server.ClientConfig) dynamicClient := dynamic.NewForConfigOrDie(server.ClientConfig) @@ -371,7 +373,7 @@ func TestCustomResourceEnqueue(t *testing.T) { scheduler.WithPodInitialBackoffSeconds(0), scheduler.WithPodMaxBackoffSeconds(0), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) defer testutils.CleanupTest(t, testCtx) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 2443857671d..649885f2240 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -51,7 +51,6 @@ type nodeStateManager struct { func TestUnschedulableNodes(t *testing.T) { testCtx := initTest(t, "unschedulable-nodes") - defer testutils.CleanupTest(t, testCtx) nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister() // NOTE: This test cannot run in parallel, because it is creating and deleting @@ -191,7 +190,6 @@ func TestMultipleSchedulers(t *testing.T) { // 1. create and start default-scheduler testCtx := initTest(t, "multi-scheduler") - defer testutils.CleanupTest(t, testCtx) // 2. create a node node := &v1.Node{ @@ -263,7 +261,7 @@ func TestMultipleSchedulers(t *testing.T) { }, }) testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithProfiles(cfg.Profiles...)) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) // 6. **check point-2**: @@ -285,7 +283,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) { }) testCtx := initTest(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...)) - defer testutils.CleanupTest(t, testCtx) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"}, @@ -349,7 +346,6 @@ func TestMultipleSchedulingProfiles(t *testing.T) { // This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not. func TestAllocatable(t *testing.T) { testCtx := initTest(t, "allocatable") - defer testutils.CleanupTest(t, testCtx) // 2. create a node without allocatable awareness nodeRes := map[v1.ResourceName]string{ @@ -423,7 +419,6 @@ func TestAllocatable(t *testing.T) { func TestSchedulerInformers(t *testing.T) { // Initialize scheduler. testCtx := initTest(t, "scheduler-informer") - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ @@ -510,9 +505,13 @@ func TestSchedulerInformers(t *testing.T) { // Cleanup pods = append(pods, unschedulable) - testutils.CleanupPods(cs, t, pods) - cs.PolicyV1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) - cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) + if err := cs.PolicyV1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { + t.Errorf("error whiling deleting PDBs, error: %v", err) + } + if err := cs.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil { + t.Errorf("error whiling deleting nodes, error: %v", err) + } }) } } @@ -526,7 +525,6 @@ func TestNodeEvents(t *testing.T) { // 4. Remove the taint from node2; pod2 should now schedule on node2 testCtx := initTest(t, "node-events") - defer testutils.CleanupTest(t, testCtx) defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) // 1.1 create pod1 diff --git a/test/integration/scheduler/scoring/priorities_test.go b/test/integration/scheduler/scoring/priorities_test.go index 52e52ebbfb2..d50d4d19996 100644 --- a/test/integration/scheduler/scoring/priorities_test.go +++ b/test/integration/scheduler/scoring/priorities_test.go @@ -17,7 +17,6 @@ limitations under the License. package scoring import ( - "context" "fmt" "strings" "testing" @@ -95,7 +94,7 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes 0, scheduler.WithProfiles(cfg.Profiles...), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) return testCtx } @@ -131,7 +130,7 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext 0, scheduler.WithProfiles(cfg.Profiles...), ) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) return testCtx } @@ -140,7 +139,6 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext // works correctly. func TestNodeResourcesScoring(t *testing.T) { testCtx := initTestSchedulerForNodeResourcesTest(t) - defer testutils.CleanupTest(t, testCtx) // Add a few nodes. _, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity( map[v1.ResourceName]string{ @@ -204,7 +202,6 @@ func TestNodeResourcesScoring(t *testing.T) { // works correctly. func TestNodeAffinityScoring(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name) - defer testutils.CleanupTest(t, testCtx) // Add a few nodes. _, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4) if err != nil { @@ -324,7 +321,6 @@ func TestPodAffinityScoring(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name) - defer testutils.CleanupTest(t, testCtx) // Add a few nodes. nodesInTopology, err := createAndWaitForNodesInCache(testCtx, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5) if err != nil { @@ -369,7 +365,6 @@ func TestPodAffinityScoring(t *testing.T) { // works correctly, i.e., the pod gets scheduled to the node where its container images are ready. func TestImageLocalityScoring(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name) - defer testutils.CleanupTest(t, testCtx) // Create a node with the large image. // We use a fake large image as the test image used by the pod, which has @@ -602,7 +597,6 @@ func TestPodTopologySpreadScoring(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)() testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet ns := testCtx.NS.Name @@ -619,9 +613,9 @@ func TestPodTopologySpreadScoring(t *testing.T) { tt.incomingPod.SetNamespace(ns) allPods := append(tt.existingPods, tt.incomingPod) - defer testutils.CleanupPods(cs, t, allPods) + defer testutils.CleanupPods(testCtx.Ctx, cs, t, allPods) for _, pod := range tt.existingPods { - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(testCtx.Ctx, pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } @@ -631,7 +625,7 @@ func TestPodTopologySpreadScoring(t *testing.T) { } } - testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{}) + testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(testCtx.Ctx, tt.incomingPod, metav1.CreateOptions{}) if err != nil && !apierrors.IsInvalid(err) { t.Fatalf("Test Failed: error while creating pod during test: %v", err) } @@ -653,9 +647,6 @@ func TestPodTopologySpreadScoring(t *testing.T) { // The setup has 300 nodes over 3 zones. func TestDefaultPodTopologySpreadScoring(t *testing.T) { testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name) - t.Cleanup(func() { - testutils.CleanupTest(t, testCtx) - }) cs := testCtx.ClientSet ns := testCtx.NS.Name diff --git a/test/integration/scheduler/taint/taint_test.go b/test/integration/scheduler/taint/taint_test.go index a9b2ef6ea46..a848c6d6965 100644 --- a/test/integration/scheduler/taint/taint_test.go +++ b/test/integration/scheduler/taint/taint_test.go @@ -79,7 +79,6 @@ func TestTaintNodeByCondition(t *testing.T) { admission.SetExternalKubeInformerFactory(externalInformers) testCtx = testutils.InitTestScheduler(t, testCtx) - defer testutils.CleanupTest(t, testCtx) cs := testCtx.ClientSet nsName := testCtx.NS.Name @@ -108,7 +107,7 @@ func TestTaintNodeByCondition(t *testing.T) { // Waiting for all controllers to sync externalInformers.Start(testCtx.Ctx.Done()) externalInformers.WaitForCacheSync(testCtx.Ctx.Done()) - testutils.SyncInformerFactory(testCtx) + testutils.SyncSchedulerInformerFactory(testCtx) // Run all controllers go nc.Run(testCtx.Ctx) @@ -525,11 +524,11 @@ func TestTaintNodeByCondition(t *testing.T) { }, } - if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil { t.Errorf("Failed to create node, err: %v", err) } if err := testutils.WaitForNodeTaints(cs, node, test.expectedTaints); err != nil { - node, err = cs.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + node, err = cs.CoreV1().Nodes().Get(testCtx.Ctx, node.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Failed to get node <%s>", node.Name) } @@ -543,7 +542,7 @@ func TestTaintNodeByCondition(t *testing.T) { pod.Name = fmt.Sprintf("%s-%d", pod.Name, i) pod.Spec.Tolerations = p.tolerations - createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(testCtx.Ctx, pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod %s/%s, error: %v", pod.Namespace, pod.Name, err) @@ -564,7 +563,7 @@ func TestTaintNodeByCondition(t *testing.T) { } } - testutils.CleanupPods(cs, t, pods) + testutils.CleanupPods(testCtx.Ctx, cs, t, pods) testutils.CleanupNodes(cs, t) testutils.WaitForSchedulerCacheCleanup(testCtx.Scheduler, t) }) diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index dc191e2e157..dbfed434ee2 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -80,7 +80,7 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc // Run API server with minimimal logging by default. Can be raised with -v. framework.MinVerbosity = 0 - _, kubeConfig, tearDownFn := framework.StartTestServer(b, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, b, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority"} diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index e9e18e22761..e19e4ab178d 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -49,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/serviceaccount" serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -59,7 +60,11 @@ const ( ) func TestServiceAccountAutoCreate(t *testing.T) { - c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -68,7 +73,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { ns := "test-service-account-creation" // Create namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("could not create namespace: %v", err) } @@ -80,7 +85,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { } // Delete service account - err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), defaultUser.Name, metav1.DeleteOptions{}) + err = c.CoreV1().ServiceAccounts(ns).Delete(ctx, defaultUser.Name, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Could not delete default serviceaccount: %v", err) } @@ -96,7 +101,11 @@ func TestServiceAccountAutoCreate(t *testing.T) { } func TestServiceAccountTokenAutoMount(t *testing.T) { - c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -105,7 +114,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { ns := "auto-mount-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -123,7 +132,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { }, } - createdPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), &protoPod, metav1.CreateOptions{}) + createdPod, err := c.CoreV1().Pods(ns).Create(ctx, &protoPod, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -138,7 +147,11 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { } func TestServiceAccountTokenAuthentication(t *testing.T) { - c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -148,19 +161,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { otherns := "other-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "ro" user in myns - roSA, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) + roSA, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -175,13 +188,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { roClient := clientset.NewForConfigOrDie(&roClientConfig) doServiceAccountAPIRequests(t, roClient, myns, true, true, false) doServiceAccountAPIRequests(t, roClient, otherns, true, false, false) - err = c.CoreV1().Secrets(myns).Delete(context.TODO(), roTokenName, metav1.DeleteOptions{}) + err = c.CoreV1().Secrets(myns).Delete(ctx, roTokenName, metav1.DeleteOptions{}) if err != nil { t.Fatalf("could not delete token: %v", err) } // wait for delete to be observed and reacted to via watch err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - _, err := roClient.CoreV1().Secrets(myns).List(context.TODO(), metav1.ListOptions{}) + _, err := roClient.CoreV1().Secrets(myns).List(ctx, metav1.ListOptions{}) if err == nil { t.Logf("token is still valid, waiting") return false, nil @@ -198,7 +211,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { doServiceAccountAPIRequests(t, roClient, myns, false, false, false) // Create "rw" user in myns - rwSA, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{}) + rwSA, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -215,8 +228,12 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { } func TestLegacyServiceAccountTokenTracking(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.LegacyServiceAccountTokenTracking, true)() - c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -224,11 +241,11 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { // create service account myns := "auth-ns" - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } - mysa, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) + mysa, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -287,7 +304,7 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { } wg.Wait() dateAfter := time.Now().UTC().Format(dateFormat) - liveSecret, err := c.CoreV1().Secrets(myns).Get(context.TODO(), test.secretName, metav1.GetOptions{}) + liveSecret, err := c.CoreV1().Secrets(myns).Get(ctx, test.secretName, metav1.GetOptions{}) if err != nil { t.Fatalf("Could not get secret: %v", err) } @@ -314,7 +331,7 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { // configmap should exist with 'since' timestamp. if err = wait.PollImmediate(time.Millisecond*10, wait.ForeverTestTimeout, func() (bool, error) { dateBefore := time.Now().UTC().Format("2006-01-02") - configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), legacytokentracking.ConfigMapName, metav1.GetOptions{}) + configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, legacytokentracking.ConfigMapName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get %q configmap, err %w", legacytokentracking.ConfigMapDataKey, err) } @@ -334,11 +351,13 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { // startServiceAccountTestServerAndWaitForCaches returns a started server // It is the responsibility of the caller to ensure the returned stopFunc is called -func startServiceAccountTestServerAndWaitForCaches(t *testing.T) (clientset.Interface, *restclient.Config, func(), error) { +func startServiceAccountTestServerAndWaitForCaches(ctx context.Context, t *testing.T) (clientset.Interface, *restclient.Config, func(), error) { var serviceAccountKey interface{} + ctx, cancel := context.WithCancel(ctx) + // Set up a API server - rootClientset, clientConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + rootClientset, clientConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { var err error serviceAccountKey, err = keyutil.PrivateKeyFromFile(opts.ServiceAccountSigningKeyFile) @@ -379,7 +398,6 @@ func startServiceAccountTestServerAndWaitForCaches(t *testing.T) (clientset.Inte }, }) - ctx, cancel := context.WithCancel(context.Background()) stop := func() { cancel() tearDownFn() diff --git a/test/integration/servicecidr/allocator_test.go b/test/integration/servicecidr/allocator_test.go index 45a2523d3b6..5c398ccb2f2 100644 --- a/test/integration/servicecidr/allocator_test.go +++ b/test/integration/servicecidr/allocator_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" netutils "k8s.io/utils/net" ) @@ -43,7 +44,11 @@ func TestServiceAlloc(t *testing.T) { // Create an IPv4 single stack control-plane serviceCIDR := "192.168.0.0/29" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -115,7 +120,11 @@ func TestServiceAllocIPAddress(t *testing.T) { serviceCIDR := "2001:db8::/64" defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)() - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10") @@ -140,7 +149,7 @@ func TestServiceAllocIPAddress(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -151,11 +160,11 @@ func TestServiceAllocIPAddress(t *testing.T) { // create 5 random services and check that the Services have an IP associated for i := 0; i < 5; i++ { - svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(i), metav1.CreateOptions{}) + svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc(i), metav1.CreateOptions{}) if err != nil { t.Error(err) } - _, err = client.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), svc.Spec.ClusterIP, metav1.GetOptions{}) + _, err = client.NetworkingV1alpha1().IPAddresses().Get(ctx, svc.Spec.ClusterIP, metav1.GetOptions{}) if err != nil { t.Error(err) } diff --git a/test/integration/statefulset/statefulset_test.go b/test/integration/statefulset/statefulset_test.go index 5d0409327ea..812f86560d3 100644 --- a/test/integration/statefulset/statefulset_test.go +++ b/test/integration/statefulset/statefulset_test.go @@ -351,8 +351,12 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 // add for issue: https://github.com/kubernetes/kubernetes/issues/108837 func TestStatefulSetStatusWithPodFail(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + limitedPodNumber := 2 - c, config, closeFn := framework.StartTestServer(t, framework.TestServerSetup{ + c, config, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { config.GenericConfig.AdmissionControl = &fakePodFailAdmission{ limitedPodNumber: limitedPodNumber, @@ -363,9 +367,6 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { resyncPeriod := 12 * time.Hour informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() ssc := statefulset.NewStatefulSetController( ctx, informers.Core().V1().Pods(), @@ -382,7 +383,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { go ssc.Run(ctx, 5) sts := newSTS("sts", ns.Name, 4) - _, err := c.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(sts.Namespace).Create(ctx, sts, metav1.CreateOptions{}) if err != nil { t.Fatalf("Could not create statefuleSet %s: %v", sts.Name, err) } @@ -390,7 +391,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { wantReplicas := limitedPodNumber var gotReplicas int32 if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(context.TODO(), sts.Name, metav1.GetOptions{}) + newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(ctx, sts.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 68442ab89e0..5497fd48554 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -58,6 +58,7 @@ import ( taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/kubernetes/test/integration/framework" imageutils "k8s.io/kubernetes/test/utils/image" + "k8s.io/kubernetes/test/utils/ktesting" "k8s.io/utils/pointer" ) @@ -143,15 +144,22 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface) { // TestContext store necessary context info type TestContext struct { - CloseFn framework.TearDownFunc NS *v1.Namespace ClientSet clientset.Interface KubeConfig *restclient.Config InformerFactory informers.SharedInformerFactory DynInformerFactory dynamicinformer.DynamicSharedInformerFactory Scheduler *scheduler.Scheduler - Ctx context.Context - CancelFn context.CancelFunc + // This is the top context when initializing the test environment. + Ctx context.Context + // CloseFn will stop the apiserver and clean up the resources + // after itself, including shutting down its storage layer. + CloseFn framework.TearDownFunc + // This is the context when initializing scheduler. + SchedulerCtx context.Context + // SchedulerCloseFn will tear down the resources in creating scheduler, + // including the scheduler itself. + SchedulerCloseFn framework.TearDownFunc } // CleanupNodes cleans all nodes which were created during integration test @@ -163,9 +171,9 @@ func CleanupNodes(cs clientset.Interface, t *testing.T) { } // PodDeleted returns true if a pod is not found in the given namespace. -func PodDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { - return func() (bool, error) { - pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) +func PodDeleted(ctx context.Context, c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc { + return func(context.Context) (bool, error) { + pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } @@ -176,25 +184,37 @@ func PodDeleted(c clientset.Interface, podNamespace, podName string) wait.Condit } } -// SyncInformerFactory starts informer and waits for caches to be synced -func SyncInformerFactory(testCtx *TestContext) { - testCtx.InformerFactory.Start(testCtx.Ctx.Done()) - if testCtx.DynInformerFactory != nil { - testCtx.DynInformerFactory.Start(testCtx.Ctx.Done()) +// PodsCleanedUp returns true if all pods are deleted in the specific namespace. +func PodsCleanedUp(ctx context.Context, c clientset.Interface, namespace string) wait.ConditionWithContextFunc { + return func(context.Context) (bool, error) { + list, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + return len(list.Items) == 0, nil } - testCtx.InformerFactory.WaitForCacheSync(testCtx.Ctx.Done()) +} + +// SyncSchedulerInformerFactory starts informer and waits for caches to be synced +func SyncSchedulerInformerFactory(testCtx *TestContext) { + testCtx.InformerFactory.Start(testCtx.SchedulerCtx.Done()) if testCtx.DynInformerFactory != nil { - testCtx.DynInformerFactory.WaitForCacheSync(testCtx.Ctx.Done()) + testCtx.DynInformerFactory.Start(testCtx.SchedulerCtx.Done()) + } + testCtx.InformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done()) + if testCtx.DynInformerFactory != nil { + testCtx.DynInformerFactory.WaitForCacheSync(testCtx.SchedulerCtx.Done()) } } // CleanupTest cleans related resources which were created during integration test func CleanupTest(t *testing.T, testCtx *TestContext) { - // Kill the scheduler. - testCtx.CancelFn() - // Cleanup nodes. - testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + // Cleanup nodes and namespaces. + if err := testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}); err != nil { + t.Errorf("error while cleaning up nodes, error: %v", err) + } framework.DeleteNamespaceOrDie(testCtx.ClientSet, testCtx.NS, t) + // Terminate the scheduler and apiserver. testCtx.CloseFn() } @@ -215,16 +235,16 @@ func RemovePodFinalizers(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { } // CleanupPods deletes the given pods and waits for them to be actually deleted. -func CleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) { +func CleanupPods(ctx context.Context, cs clientset.Interface, t *testing.T, pods []*v1.Pod) { for _, p := range pods { - err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, *metav1.NewDeleteOptions(0)) + err := cs.CoreV1().Pods(p.Namespace).Delete(ctx, p.Name, *metav1.NewDeleteOptions(0)) if err != nil && !apierrors.IsNotFound(err) { t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err) } } for _, p := range pods { - if err := wait.Poll(time.Millisecond, wait.ForeverTestTimeout, - PodDeleted(cs, p.Namespace, p.Name)); err != nil { + if err := wait.PollUntilContextTimeout(ctx, time.Duration(time.Microsecond.Seconds()), wait.ForeverTestTimeout, true, + PodDeleted(ctx, cs, p.Namespace, p.Name)); err != nil { t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err) } } @@ -330,14 +350,14 @@ func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error { // InitTestAPIServer initializes a test environment and creates an API server with default // configuration. +// It registers cleanup functions to t.Cleanup(), they will be called when the test completes, +// no need to do this again. func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext { - ctx, cancelFunc := context.WithCancel(context.Background()) - testCtx := TestContext{ - Ctx: ctx, - CancelFn: cancelFunc, - } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + testCtx := TestContext{Ctx: ctx} - testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{ + testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(options *options.ServerRunOptions) { options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority", "StorageObjectInUseProtection"} }, @@ -348,12 +368,22 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf }, }) + oldCloseFn := testCtx.CloseFn + testCtx.CloseFn = func() { + cancel() + oldCloseFn() + } + if nsPrefix != "default" { testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, nsPrefix+string(uuid.NewUUID()), t) } else { testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, "default", t) } + t.Cleanup(func() { + CleanupTest(t, &testCtx) + }) + return &testCtx } @@ -388,6 +418,9 @@ func InitTestSchedulerWithOptions( resyncPeriod time.Duration, opts ...scheduler.Option, ) *TestContext { + ctx, cancel := context.WithCancel(testCtx.Ctx) + testCtx.SchedulerCtx = ctx + // 1. Create scheduler testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, resyncPeriod) if testCtx.KubeConfig != nil { @@ -406,7 +439,7 @@ func InitTestSchedulerWithOptions( testCtx.InformerFactory, testCtx.DynInformerFactory, profile.NewRecorderFactory(eventBroadcaster), - testCtx.Ctx.Done(), + ctx.Done(), opts..., ) @@ -414,13 +447,19 @@ func InitTestSchedulerWithOptions( t.Fatalf("Couldn't create scheduler: %v", err) } - eventBroadcaster.StartRecordingToSink(testCtx.Ctx.Done()) + eventBroadcaster.StartRecordingToSink(ctx.Done()) oldCloseFn := testCtx.CloseFn testCtx.CloseFn = func() { oldCloseFn() eventBroadcaster.Shutdown() } + + testCtx.SchedulerCloseFn = func() { + cancel() + eventBroadcaster.Shutdown() + } + return testCtx } @@ -488,8 +527,8 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di // configuration. func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext { testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), 0, opts...) - SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) + SyncSchedulerInformerFactory(testCtx) + go testCtx.Scheduler.Run(testCtx.SchedulerCtx) return testCtx } @@ -512,8 +551,8 @@ func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext { t, InitTestAPIServer(t, nsPrefix, nil), 0, scheduler.WithProfiles(cfg.Profiles...)) - SyncInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.Ctx) + SyncSchedulerInformerFactory(testCtx) + go testCtx.Scheduler.Run(testCtx.SchedulerCtx) return testCtx } diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index efb2f512081..d91554b7f7e 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -998,10 +998,7 @@ func TestRescheduleProvisioning(t *testing.T) { ns := testCtx.NS.Name defer func() { - testCtx.CancelFn() deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) - testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) - testCtx.CloseFn() }() ctrl, informerFactory, err := initPVController(t, testCtx, 0) @@ -1049,7 +1046,7 @@ func TestRescheduleProvisioning(t *testing.T) { func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod) - testutil.SyncInformerFactory(testCtx) + testutil.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) clientset := testCtx.ClientSet @@ -1087,7 +1084,6 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t teardown: func() { klog.Infof("test cluster %q start to tear down", ns) deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) - testutil.CleanupTest(t, testCtx) }, } } diff --git a/test/integration/volumescheduling/volume_capacity_priority_test.go b/test/integration/volumescheduling/volume_capacity_priority_test.go index 9122b18d203..aeeac39f43e 100644 --- a/test/integration/volumescheduling/volume_capacity_priority_test.go +++ b/test/integration/volumescheduling/volume_capacity_priority_test.go @@ -48,7 +48,7 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node { func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod) - testutil.SyncInformerFactory(testCtx) + testutil.SyncSchedulerInformerFactory(testCtx) go testCtx.Scheduler.Run(testCtx.Ctx) clientset := testCtx.ClientSet @@ -71,7 +71,6 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe teardown: func() { klog.Infof("test cluster %q start to tear down", ns) deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) - testutil.CleanupTest(t, testCtx) }, } }