diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 28aac59bf28..72646863cd3 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -480,11 +480,12 @@ func TestDeleteFinalStateUnknown(t *testing.T) { } func TestExpectationsOnRecreate(t *testing.T) { - client := fake.NewSimpleClientset() - stopCh := make(chan struct{}) - defer close(stopCh) - _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := fake.NewSimpleClientset() + f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) dsc, err := NewDaemonSetsController( ctx, @@ -550,8 +551,8 @@ func TestExpectationsOnRecreate(t *testing.T) { t.Fatal(err) } - f.Start(stopCh) - for ty, ok := range f.WaitForCacheSync(stopCh) { + f.Start(ctx.Done()) + for ty, ok := range f.WaitForCacheSync(ctx.Done()) { if !ok { t.Fatalf("caches failed to sync: %v", ty) } diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 6c4d8e7fd21..a6e46eaa194 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -70,14 +70,18 @@ import ( "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/etcd" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) -func setup(t *testing.T, groupVersions ...schema.GroupVersion) (clientset.Interface, *restclient.Config, framework.TearDownFunc) { +func setup(t *testing.T, groupVersions ...schema.GroupVersion) (context.Context, clientset.Interface, *restclient.Config, framework.TearDownFunc) { return setupWithResources(t, groupVersions, nil) } -func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resources []schema.GroupVersionResource) (clientset.Interface, *restclient.Config, framework.TearDownFunc) { - return framework.StartTestServer(t, framework.TestServerSetup{ +func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resources []schema.GroupVersionResource) (context.Context, clientset.Interface, *restclient.Config, framework.TearDownFunc) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + + client, config, teardown := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { if len(groupVersions) > 0 || len(resources) > 0 { resourceConfig := controlplane.DefaultAPIResourceConfigSource() @@ -87,6 +91,13 @@ func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resou } }, }) + + newTeardown := func() { + cancel() + teardown() + } + + return ctx, client, config, newTeardown } func verifyStatusCode(t *testing.T, transport http.RoundTripper, verb, URL, body string, expectedStatusCode int) { @@ -147,7 +158,7 @@ var cascDel = ` ` func Test4xxStatusCodeInvalidPatch(t *testing.T) { - client, _, tearDownFn := setup(t) + ctx, client, _, tearDownFn := setup(t) defer tearDownFn() obj := []byte(`{ @@ -183,7 +194,7 @@ func Test4xxStatusCodeInvalidPatch(t *testing.T) { AbsPath("/apis/apps/v1"). Namespace("default"). Resource("deployments"). - Body(obj).Do(context.TODO()).Get() + Body(obj).Do(ctx).Get() if err != nil { t.Fatalf("Failed to create object: %v: %v", err, resp) } @@ -192,7 +203,7 @@ func Test4xxStatusCodeInvalidPatch(t *testing.T) { Namespace("default"). Resource("deployments"). Name("deployment"). - Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(context.TODO()) + Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(ctx) var statusCode int result.StatusCode(&statusCode) if statusCode != 422 { @@ -203,7 +214,7 @@ func Test4xxStatusCodeInvalidPatch(t *testing.T) { Namespace("default"). Resource("deployments"). Name("deployment"). - Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(context.TODO()) + Body([]byte(`{"metadata":{"annotations":{"foo":["bar"]}}}`)).Do(ctx) result.StatusCode(&statusCode) if statusCode != 422 { t.Fatalf("Expected status code to be 422, got %v (%#v)", statusCode, result) @@ -303,7 +314,7 @@ func TestHSTS(t *testing.T) { // Tests that the apiserver returns 202 status code as expected. func Test202StatusCode(t *testing.T) { - clientSet, kubeConfig, tearDownFn := setup(t) + ctx, clientSet, kubeConfig, tearDownFn := setup(t) defer tearDownFn() transport, err := restclient.TransportFor(kubeConfig) @@ -318,7 +329,7 @@ func Test202StatusCode(t *testing.T) { // 1. Create the resource without any finalizer and then delete it without setting DeleteOptions. // Verify that server returns 200 in this case. - rs, err := rsClient.Create(context.TODO(), newRS(ns.Name), metav1.CreateOptions{}) + rs, err := rsClient.Create(ctx, newRS(ns.Name), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -328,7 +339,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the apiserver still returns 200 since DeleteOptions.OrphanDependents is not set. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = rsClient.Create(ctx, rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -337,7 +348,7 @@ func Test202StatusCode(t *testing.T) { // 3. Create the resource and then delete it with DeleteOptions.OrphanDependents=false. // Verify that the server still returns 200 since the resource is immediately deleted. rs = newRS(ns.Name) - rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = rsClient.Create(ctx, rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -347,7 +358,7 @@ func Test202StatusCode(t *testing.T) { // Verify that the server returns 202 in this case. rs = newRS(ns.Name) rs.ObjectMeta.Finalizers = []string{"kube.io/dummy-finalizer"} - rs, err = rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}) + rs, err = rsClient.Create(ctx, rs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create rs: %v", err) } @@ -363,12 +374,17 @@ var ( // TestListOptions ensures that list works as expected for valid and invalid combinations of limit, continue, // resourceVersion and resourceVersionMatch. func TestListOptions(t *testing.T) { + for _, watchCacheEnabled := range []bool{true, false} { t.Run(fmt.Sprintf("watchCacheEnabled=%t", watchCacheEnabled), func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() var storageTransport *storagebackend.TransportConfig - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.EnableWatchCache = watchCacheEnabled storageTransport = &opts.Etcd.StorageConfig.Transport @@ -604,11 +620,16 @@ func TestListResourceVersion0(t *testing.T) { watchCacheEnabled: false, }, } + for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.EnableWatchCache = tc.watchCacheEnabled }, @@ -623,7 +644,7 @@ func TestListResourceVersion0(t *testing.T) { for i := 0; i < 10; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -631,7 +652,7 @@ func TestListResourceVersion0(t *testing.T) { if tc.watchCacheEnabled { // poll until the watch cache has the full list in memory err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - list, err := clientSet.AppsV1().ReplicaSets(ns.Name).List(context.Background(), metav1.ListOptions{ResourceVersion: "0"}) + list, err := clientSet.AppsV1().ReplicaSets(ns.Name).List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { return false, err } @@ -643,12 +664,12 @@ func TestListResourceVersion0(t *testing.T) { } pagerFn := func(opts metav1.ListOptions) (runtime.Object, error) { - return rsClient.List(context.TODO(), opts) + return rsClient.List(ctx, opts) } p := pager.New(pager.SimplePageFunc(pagerFn)) p.PageSize = 3 - listObj, _, err := p.List(context.Background(), metav1.ListOptions{ResourceVersion: "0"}) + listObj, _, err := p.List(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { t.Fatalf("Unexpected list error: %v", err) } @@ -665,7 +686,7 @@ func TestListResourceVersion0(t *testing.T) { func TestAPIListChunking(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientSet, "list-paging", t) @@ -676,7 +697,7 @@ func TestAPIListChunking(t *testing.T) { for i := 0; i < 4; i++ { rs := newRS(ns.Name) rs.Name = fmt.Sprintf("test-%d", i) - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -687,7 +708,7 @@ func TestAPIListChunking(t *testing.T) { PageSize: 1, PageFn: pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { calls++ - list, err := rsClient.List(context.TODO(), opts) + list, err := rsClient.List(ctx, opts) if err != nil { return nil, err } @@ -697,14 +718,14 @@ func TestAPIListChunking(t *testing.T) { if calls == 2 { rs := newRS(ns.Name) rs.Name = "test-5" - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } return list, err }), } - listObj, _, err := p.List(context.Background(), metav1.ListOptions{}) + listObj, _, err := p.List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -733,7 +754,7 @@ func TestAPIListChunking(t *testing.T) { func TestAPIListChunkingWithLabelSelector(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APIListChunking, true)() - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientSet, "list-paging-with-label-selector", t) @@ -746,7 +767,7 @@ func TestAPIListChunkingWithLabelSelector(t *testing.T) { rs.Name = fmt.Sprintf("test-%d", i) odd := i%2 != 0 rs.Labels = map[string]string{"odd-index": strconv.FormatBool(odd)} - if _, err := rsClient.Create(context.TODO(), rs, metav1.CreateOptions{}); err != nil { + if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -757,7 +778,7 @@ func TestAPIListChunkingWithLabelSelector(t *testing.T) { PageSize: 1, PageFn: pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { calls++ - list, err := rsClient.List(context.TODO(), opts) + list, err := rsClient.List(ctx, opts) if err != nil { return nil, err } @@ -767,7 +788,7 @@ func TestAPIListChunkingWithLabelSelector(t *testing.T) { return list, err }), } - listObj, _, err := p.List(context.Background(), metav1.ListOptions{LabelSelector: "odd-index=true", Limit: 3}) + listObj, _, err := p.List(ctx, metav1.ListOptions{LabelSelector: "odd-index=true", Limit: 3}) if err != nil { t.Fatal(err) } @@ -806,7 +827,7 @@ func makeSecret(name string) *v1.Secret { } func TestNameInFieldSelector(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() numNamespaces := 3 @@ -814,11 +835,11 @@ func TestNameInFieldSelector(t *testing.T) { ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("ns%d", i), t) defer framework.DeleteNamespaceOrDie(clientSet, ns, t) - _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("foo"), metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets(ns.Name).Create(ctx, makeSecret("foo"), metav1.CreateOptions{}) if err != nil { t.Errorf("Couldn't create secret: %v", err) } - _, err = clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), makeSecret("bar"), metav1.CreateOptions{}) + _, err = clientSet.CoreV1().Secrets(ns.Name).Create(ctx, makeSecret("bar"), metav1.CreateOptions{}) if err != nil { t.Errorf("Couldn't create secret: %v", err) } @@ -865,7 +886,7 @@ func TestNameInFieldSelector(t *testing.T) { opts := metav1.ListOptions{ FieldSelector: tc.selector, } - secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(context.TODO(), opts) + secrets, err := clientSet.CoreV1().Secrets(tc.namespace).List(ctx, opts) if err != nil { t.Errorf("%s: Unexpected error: %v", tc.selector, err) } @@ -897,7 +918,7 @@ func TestMetadataClient(t *testing.T) { } defer tearDown() - clientset, kubeConfig, tearDownFn := setup(t) + ctx, clientset, kubeConfig, tearDownFn := setup(t) defer tearDownFn() apiExtensionClient, err := apiextensionsclient.NewForConfig(config) @@ -951,7 +972,7 @@ func TestMetadataClient(t *testing.T) { namespace := framework.CreateNamespaceOrDie(clientset, ns, t) defer framework.DeleteNamespaceOrDie(clientset, namespace, t) - svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(ns).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } @@ -964,7 +985,7 @@ func TestMetadataClient(t *testing.T) { }) client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services")) - items, err := client.Namespace(ns).List(context.TODO(), metav1.ListOptions{}) + items, err := client.Namespace(ns).List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -983,7 +1004,7 @@ func TestMetadataClient(t *testing.T) { } wrapper.resp = nil - item, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}) + item, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -994,7 +1015,7 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected response: %#v", wrapper.resp) } - item, err = client.Namespace(ns).Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) + item, err = client.Namespace(ns).Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -1002,11 +1023,11 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected object: %#v", item) } - if err := client.Namespace(ns).Delete(context.TODO(), "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { + if err := client.Namespace(ns).Delete(ctx, "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { t.Fatal(err) } - if _, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + if _, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatal(err) } }, @@ -1016,7 +1037,7 @@ func TestMetadataClient(t *testing.T) { want: func(t *testing.T) { ns := "metadata-crd" crclient := dynamicClient.Resource(crdGVR).Namespace(ns) - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{ + cr, err := crclient.Create(ctx, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "cr.bar.com/v1", "kind": "Foo", @@ -1041,7 +1062,7 @@ func TestMetadataClient(t *testing.T) { }) client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR) - items, err := client.Namespace(ns).List(context.TODO(), metav1.ListOptions{}) + items, err := client.Namespace(ns).List(ctx, metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -1060,7 +1081,7 @@ func TestMetadataClient(t *testing.T) { } wrapper.resp = nil - item, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}) + item, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -1071,7 +1092,7 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected response: %#v", wrapper.resp) } - item, err = client.Namespace(ns).Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) + item, err = client.Namespace(ns).Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"foo":"baz"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -1079,10 +1100,10 @@ func TestMetadataClient(t *testing.T) { t.Fatalf("unexpected object: %#v", item) } - if err := client.Namespace(ns).Delete(context.TODO(), "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { + if err := client.Namespace(ns).Delete(ctx, "test-1", metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &item.UID}}); err != nil { t.Fatal(err) } - if _, err := client.Namespace(ns).Get(context.TODO(), "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + if _, err := client.Namespace(ns).Get(ctx, "test-1", metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatal(err) } }, @@ -1094,11 +1115,11 @@ func TestMetadataClient(t *testing.T) { namespace := framework.CreateNamespaceOrDie(clientset, ns, t) defer framework.DeleteNamespaceOrDie(clientset, namespace, t) - svc, err := clientset.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(ns).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"foo": "bar"}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(ns).Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } @@ -1110,7 +1131,7 @@ func TestMetadataClient(t *testing.T) { }) client := metadata.NewForConfigOrDie(cfg).Resource(v1.SchemeGroupVersion.WithResource("services")) - w, err := client.Namespace(ns).Watch(context.TODO(), metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true}) + w, err := client.Namespace(ns).Watch(ctx, metav1.ListOptions{ResourceVersion: svc.ResourceVersion, Watch: true}) if err != nil { t.Fatal(err) } @@ -1147,7 +1168,7 @@ func TestMetadataClient(t *testing.T) { want: func(t *testing.T) { ns := "metadata-watch-crd" crclient := dynamicClient.Resource(crdGVR).Namespace(ns) - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{ + cr, err := crclient.Create(ctx, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "cr.bar.com/v1", "kind": "Foo", @@ -1167,7 +1188,7 @@ func TestMetadataClient(t *testing.T) { cfg := metadata.ConfigFor(config) client := metadata.NewForConfigOrDie(cfg).Resource(crdGVR) - patched, err := client.Namespace(ns).Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}) + patched, err := client.Namespace(ns).Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}) if err != nil { t.Fatal(err) } @@ -1182,7 +1203,7 @@ func TestMetadataClient(t *testing.T) { }) client = metadata.NewForConfigOrDie(cfg).Resource(crdGVR) - w, err := client.Namespace(ns).Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true}) + w, err := client.Namespace(ns).Watch(ctx, metav1.ListOptions{ResourceVersion: cr.GetResourceVersion(), Watch: true}) if err != nil { t.Fatal(err) } @@ -1231,7 +1252,7 @@ func TestAPICRDProtobuf(t *testing.T) { } defer tearDown() - _, kubeConfig, tearDownFn := setup(t) + ctx, _, kubeConfig, tearDownFn := setup(t) defer tearDownFn() apiExtensionClient, err := apiextensionsclient.NewForConfig(config) @@ -1285,11 +1306,11 @@ func TestAPICRDProtobuf(t *testing.T) { name: "server returns 406 when asking for protobuf for CRDs, which dynamic client does not support", accept: "application/vnd.kubernetes.protobuf", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1314,11 +1335,11 @@ func TestAPICRDProtobuf(t *testing.T) { name: "server returns JSON when asking for protobuf and json for CRDs", accept: "application/vnd.kubernetes.protobuf,application/json", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1343,11 +1364,11 @@ func TestAPICRDProtobuf(t *testing.T) { accept: "application/vnd.kubernetes.protobuf", subresource: "status", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"3"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"3"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1373,11 +1394,11 @@ func TestAPICRDProtobuf(t *testing.T) { accept: "application/vnd.kubernetes.protobuf,application/json", subresource: "status", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-4"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "spec": map[string]interface{}{"field": 1}, "metadata": map[string]interface{}{"name": "test-4"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"4"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"4"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1420,7 +1441,7 @@ func TestAPICRDProtobuf(t *testing.T) { w, err := client.Get(). Resource(resource).NamespaceIfScoped(obj.GetNamespace(), len(obj.GetNamespace()) > 0).Name(obj.GetName()).SubResource(tc.subresource). SetHeader("Accept", tc.accept). - Stream(context.TODO()) + Stream(ctx) if (tc.wantErr != nil) != (err != nil) { t.Fatalf("unexpected error: %v", err) } @@ -1445,7 +1466,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { } defer tearDown() - clientset, kubeConfig, tearDownFn := setup(t) + ctx, clientset, kubeConfig, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientset, testNamespace, t) @@ -1529,7 +1550,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { name: "v1 verify status subresource returns a table for CRDs", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := subresourcesCrclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-1"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) + cr, err := subresourcesCrclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-1"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } @@ -1541,7 +1562,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { name: "v1 verify scale subresource returns a table for CRDs", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := subresourcesCrclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-2"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) + cr, err := subresourcesCrclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "FooSub", "metadata": map[string]interface{}{"name": "test-2"}, "spec": map[string]interface{}{"replicas": 2}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } @@ -1576,7 +1597,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { }, }, } - rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(ctx, rc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create replicationcontroller: %v", err) } @@ -1611,7 +1632,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { }, }, } - rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(context.TODO(), rc, metav1.CreateOptions{}) + rc, err := clientset.CoreV1().ReplicationControllers(testNamespace).Create(ctx, rc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create replicationcontroller: %v", err) } @@ -1645,7 +1666,7 @@ func TestGetSubresourcesAsTables(t *testing.T) { SetHeader("Accept", tc.accept). Name(obj.GetName()). SubResource(tc.subresource). - Do(context.TODO()) + Do(ctx) resObj, err := res.Get() if err != nil { @@ -1667,7 +1688,7 @@ func TestTransform(t *testing.T) { } defer tearDown() - clientset, kubeConfig, tearDownFn := setup(t) + ctx, clientset, kubeConfig, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientset, testNamespace, t) @@ -1711,7 +1732,7 @@ func TestTransform(t *testing.T) { crdGVR := schema.GroupVersionResource{Group: fooCRD.Spec.Group, Version: fooCRD.Spec.Versions[0].Name, Resource: "foos"} crclient := dynamicClient.Resource(crdGVR).Namespace(testNamespace) - previousList, err := crclient.List(context.TODO(), metav1.ListOptions{}) + previousList, err := crclient.List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("failed to list CRs before test: %v", err) } @@ -1739,11 +1760,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on CRDs in json", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-1"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-1", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1756,11 +1777,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on CRDs in json;stream=watch", accept: "application/json;stream=watch;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-2"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-2", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1773,11 +1794,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on CRDs in yaml", accept: "application/yaml;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-3"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-3", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1796,11 +1817,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-1"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -1814,11 +1835,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-2"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1832,11 +1853,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1beta1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-3"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -1856,11 +1877,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-1", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1873,11 +1894,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-2", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -1890,11 +1911,11 @@ func TestTransform(t *testing.T) { name: "v1beta1 verify partial metadata object on CRDs in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1beta1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-4", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-4", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-4", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -1995,11 +2016,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on CRDs in json", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-5"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-5"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-5", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-5", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2012,11 +2033,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on CRDs in json;stream=watch", accept: "application/json;stream=watch;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-6"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-6"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-6", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-6", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2029,11 +2050,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on CRDs in yaml", accept: "application/yaml;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-7"}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-7"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), "test-7", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, "test-7", types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2052,11 +2073,11 @@ func TestTransform(t *testing.T) { name: "v1 verify columns on services", accept: "application/json;as=Table;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - svc, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + svc, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-5"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create service: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, svc.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update service: %v", err) } return svc, "", "services" @@ -2070,11 +2091,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeNone, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-6"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -2088,11 +2109,11 @@ func TestTransform(t *testing.T) { accept: "application/json;as=Table;g=meta.k8s.io;v=v1", includeObject: metav1.IncludeObject, object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().Services(testNamespace).Create(context.TODO(), &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().Services(testNamespace).Create(ctx, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-7"}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1000}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().Services(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().Services(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "services" @@ -2112,11 +2133,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps", accept: "application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-3", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -2129,11 +2150,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on config maps in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) + obj, err := clientset.CoreV1().ConfigMaps(testNamespace).Create(ctx, &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-4", Annotations: map[string]string{"test": "0"}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create object: %v", err) } - if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(context.TODO(), obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := clientset.CoreV1().ConfigMaps(testNamespace).Patch(ctx, obj.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to update object: %v", err) } return obj, "", "configmaps" @@ -2146,11 +2167,11 @@ func TestTransform(t *testing.T) { name: "v1 verify partial metadata object on CRDs in protobuf", accept: "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1", object: func(t *testing.T) (metav1.Object, string, string) { - cr, err := crclient.Create(context.TODO(), &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-8", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) + cr, err := crclient.Create(ctx, &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": "cr.bar.com/v1", "kind": "Foo", "metadata": map[string]interface{}{"name": "test-8", "annotations": map[string]string{"test": "0"}}}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unable to create cr: %v", err) } - if _, err := crclient.Patch(context.TODO(), cr.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { + if _, err := crclient.Patch(ctx, cr.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"test":"1"}}}`), metav1.PatchOptions{}); err != nil { t.Fatalf("unable to patch cr: %v", err) } return cr, crdGVR.Group, "foos" @@ -2266,9 +2287,9 @@ func TestTransform(t *testing.T) { rv = previousRV } - ctx, cancel := context.WithTimeout(context.Background(), wait.ForeverTestTimeout) + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, wait.ForeverTestTimeout) t.Cleanup(func() { - cancel() + timeoutCancel() }) w, err := client.Get(). Resource(resource).NamespaceIfScoped(obj.GetNamespace(), len(obj.GetNamespace()) > 0). @@ -2279,7 +2300,7 @@ func TestTransform(t *testing.T) { FieldSelector: fields.OneTermEqualSelector("metadata.name", obj.GetName()).String(), }, metav1.ParameterCodec). Param("includeObject", string(tc.includeObject)). - Stream(ctx) + Stream(timeoutCtx) if (tc.wantErr != nil) != (err != nil) { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/apiserver/certreload/certreload_test.go b/test/integration/apiserver/certreload/certreload_test.go index 7559c4dfab0..d1bf7189da5 100644 --- a/test/integration/apiserver/certreload/certreload_test.go +++ b/test/integration/apiserver/certreload/certreload_test.go @@ -43,6 +43,7 @@ import ( "k8s.io/component-base/cli/flag" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) type caWithClient struct { @@ -135,6 +136,10 @@ func TestClientCARecreate(t *testing.T) { } func testClientCA(t *testing.T, recreate bool) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + frontProxyCA, err := newTestCAWithClient( pkix.Name{ CommonName: "test-front-proxy-ca", @@ -170,7 +175,7 @@ func testClientCA(t *testing.T, recreate bool) { clientCAFilename := "" frontProxyCAFilename := "" - kubeClient, kubeconfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 clientCAFilename = opts.Authentication.ClientCert.ClientCA @@ -300,7 +305,7 @@ func testClientCA(t *testing.T, recreate bool) { } // Call an endpoint to make sure we are authenticated - _, err = testClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + _, err = testClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { t.Error(err) } @@ -468,9 +473,13 @@ func TestServingCertRecreate(t *testing.T) { } func testServingCert(t *testing.T, recreate bool) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var servingCertPath string - _, kubeconfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 servingCertPath = opts.SecureServing.ServerCert.CertDirectory @@ -509,7 +518,11 @@ func testServingCert(t *testing.T, recreate bool) { func TestSNICert(t *testing.T) { var servingCertPath string - _, kubeconfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + _, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 servingCertPath = opts.SecureServing.ServerCert.CertDirectory diff --git a/test/integration/apiserver/export_test.go b/test/integration/apiserver/export_test.go index a2ecbea8d2f..1843bf363ec 100644 --- a/test/integration/apiserver/export_test.go +++ b/test/integration/apiserver/export_test.go @@ -17,7 +17,6 @@ limitations under the License. package apiserver import ( - "context" "net/http" "testing" @@ -27,31 +26,30 @@ import ( // Tests that the apiserver rejects the export param func TestExportRejection(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() - _, err := clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + _, err := clientSet.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "export-fail"}, }, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } defer func() { - clientSet.CoreV1().Namespaces().Delete(context.Background(), "export-fail", metav1.DeleteOptions{}) + clientSet.CoreV1().Namespaces().Delete(ctx, "export-fail", metav1.DeleteOptions{}) }() - result := clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "true").Do(context.Background()) + result := clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "true").Do(ctx) statusCode := 0 result.StatusCode(&statusCode) if statusCode != http.StatusBadRequest { t.Errorf("expected %v, got %v", http.StatusBadRequest, statusCode) } - result = clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "false").Do(context.Background()) + result = clientSet.Discovery().RESTClient().Get().AbsPath("/api/v1/namespaces/export-fail").Param("export", "false").Do(ctx) statusCode = 0 result.StatusCode(&statusCode) if statusCode != http.StatusOK { t.Errorf("expected %v, got %v", http.StatusOK, statusCode) } - } diff --git a/test/integration/apiserver/flowcontrol/concurrency_test.go b/test/integration/apiserver/flowcontrol/concurrency_test.go index a090c7528f5..f3bbc55cdef 100644 --- a/test/integration/apiserver/flowcontrol/concurrency_test.go +++ b/test/integration/apiserver/flowcontrol/concurrency_test.go @@ -38,6 +38,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -48,8 +49,11 @@ const ( timeout = time.Second * 10 ) -func setup(t testing.TB, maxReadonlyRequestsInFlight, MaxMutatingRequestsInFlight int) (*rest.Config, framework.TearDownFunc) { - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ +func setup(t testing.TB, maxReadonlyRequestsInFlight, MaxMutatingRequestsInFlight int) (context.Context, *rest.Config, framework.TearDownFunc) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure all clients are allowed to send requests. opts.Authorization.Modes = []string{"AlwaysAllow"} @@ -57,13 +61,18 @@ func setup(t testing.TB, maxReadonlyRequestsInFlight, MaxMutatingRequestsInFligh opts.GenericServerRunOptions.MaxMutatingRequestsInFlight = MaxMutatingRequestsInFlight }, }) - return kubeConfig, tearDownFn + + newTeardown := func() { + cancel() + tearDownFn() + } + return ctx, kubeConfig, newTeardown } func TestPriorityLevelIsolation(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() // NOTE: disabling the feature should fail the test - kubeConfig, closeFn := setup(t, 1, 1) + ctx, kubeConfig, closeFn := setup(t, 1, 1) defer closeFn() loopbackClient := clientset.NewForConfigOrDie(kubeConfig) @@ -106,7 +115,7 @@ func TestPriorityLevelIsolation(t *testing.T) { // "elephant" wg.Add(concurrencyShares + queueLength) streamRequests(concurrencyShares+queueLength, func() { - _, err := noxu1Client.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + _, err := noxu1Client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { t.Error(err) } @@ -114,7 +123,7 @@ func TestPriorityLevelIsolation(t *testing.T) { // "mouse" wg.Add(3) streamRequests(3, func() { - _, err := noxu2Client.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + _, err := noxu2Client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { t.Error(err) } diff --git a/test/integration/apiserver/flowcontrol/concurrency_util_test.go b/test/integration/apiserver/flowcontrol/concurrency_util_test.go index e5b90bae99f..ba5c88f31bf 100644 --- a/test/integration/apiserver/flowcontrol/concurrency_util_test.go +++ b/test/integration/apiserver/flowcontrol/concurrency_util_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -147,10 +148,14 @@ func (d *noxuDelayingAuthorizer) Authorize(ctx context.Context, a authorizer.Att // Secondarily, this test also checks the observed seat utilizations against values derived from expecting that // the throughput observed by the client equals the execution throughput observed by the server. func TestConcurrencyIsolation(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() // NOTE: disabling the feature should fail the test - _, kubeConfig, closeFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure all clients are allowed to send requests. opts.Authorization.Modes = []string{"AlwaysAllow"} @@ -191,7 +196,7 @@ func TestConcurrencyIsolation(t *testing.T) { wg.Add(noxu1NumGoroutines) streamRequests(noxu1NumGoroutines, func() { start := time.Now() - _, err := noxu1Client.CoreV1().Namespaces().Get(context.Background(), "default", metav1.GetOptions{}) + _, err := noxu1Client.CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{}) duration := time.Since(start).Seconds() noxu1LatMeasure.update(duration) if err != nil { @@ -204,7 +209,7 @@ func TestConcurrencyIsolation(t *testing.T) { wg.Add(noxu2NumGoroutines) streamRequests(noxu2NumGoroutines, func() { start := time.Now() - _, err := noxu2Client.CoreV1().Namespaces().Get(context.Background(), "default", metav1.GetOptions{}) + _, err := noxu2Client.CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{}) duration := time.Since(start).Seconds() noxu2LatMeasure.update(duration) if err != nil { diff --git a/test/integration/apiserver/flowcontrol/fight_test.go b/test/integration/apiserver/flowcontrol/fight_test.go index 2279d597c52..e80be152fb2 100644 --- a/test/integration/apiserver/flowcontrol/fight_test.go +++ b/test/integration/apiserver/flowcontrol/fight_test.go @@ -172,7 +172,7 @@ func (ft *fightTest) evaluate(tBeforeCreate, tAfterCreate time.Time) { } func TestConfigConsumerFight(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() - kubeConfig, closeFn := setup(t, 100, 100) + _, kubeConfig, closeFn := setup(t, 100, 100) defer closeFn() const teamSize = 3 ft := newFightTest(t, kubeConfig, teamSize) diff --git a/test/integration/apiserver/flowcontrol/fs_condition_test.go b/test/integration/apiserver/flowcontrol/fs_condition_test.go index 823466be067..e91c382769d 100644 --- a/test/integration/apiserver/flowcontrol/fs_condition_test.go +++ b/test/integration/apiserver/flowcontrol/fs_condition_test.go @@ -17,7 +17,6 @@ limitations under the License. package flowcontrol import ( - "context" "encoding/json" "testing" "time" @@ -38,15 +37,11 @@ import ( func TestConditionIsolation(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.APIPriorityAndFairness, true)() // NOTE: disabling the feature should fail the test - kubeConfig, closeFn := setup(t, 10, 10) + ctx, kubeConfig, closeFn := setup(t, 10, 10) defer closeFn() loopbackClient := clientset.NewForConfigOrDie(kubeConfig) - stopCh := make(chan struct{}) - defer close(stopCh) - ctx := context.Background() - fsOrig := fcboot.SuggestedFlowSchemas[0] t.Logf("Testing Status Condition isolation in FlowSchema %q", fsOrig.Name) fsClient := loopbackClient.FlowcontrolV1beta3().FlowSchemas() @@ -60,7 +55,7 @@ func TestConditionIsolation(t *testing.T) { } dangleOrig = getCondition(fsGot.Status.Conditions, flowcontrol.FlowSchemaConditionDangling) return dangleOrig != nil, nil - }, stopCh) + }, ctx.Done()) ssaType := flowcontrol.FlowSchemaConditionType("test-ssa") patchSSA := flowcontrolapply.FlowSchema(fsOrig.Name). diff --git a/test/integration/apiserver/max_json_patch_operations_test.go b/test/integration/apiserver/max_json_patch_operations_test.go index 357d7092955..c1c455851da 100644 --- a/test/integration/apiserver/max_json_patch_operations_test.go +++ b/test/integration/apiserver/max_json_patch_operations_test.go @@ -28,11 +28,16 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // Tests that the apiserver limits the number of operations in a json patch. func TestMaxJSONPatchOperations(t *testing.T) { - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 }, @@ -50,13 +55,13 @@ func TestMaxJSONPatchOperations(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets("default").Create(ctx, secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } err = c.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")). - Body(hugePatch).Do(context.TODO()).Error() + Body(hugePatch).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } diff --git a/test/integration/apiserver/max_request_body_bytes_test.go b/test/integration/apiserver/max_request_body_bytes_test.go index 9b493058b12..6f10c1ed8f1 100644 --- a/test/integration/apiserver/max_request_body_bytes_test.go +++ b/test/integration/apiserver/max_request_body_bytes_test.go @@ -26,11 +26,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // Tests that the apiserver limits the resource size in write operations. func TestMaxResourceSize(t *testing.T) { - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() hugeData := []byte(strings.Repeat("x", 3*1024*1024+1)) @@ -40,7 +45,7 @@ func TestMaxResourceSize(t *testing.T) { c := clientSet.CoreV1().RESTClient() t.Run("Create should limit the request body size", func(t *testing.T) { err := c.Post().AbsPath("/api/v1/namespaces/default/pods"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -56,14 +61,14 @@ func TestMaxResourceSize(t *testing.T) { Name: "test", }, } - _, err := clientSet.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets("default").Create(ctx, secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } t.Run("Update should limit the request body size", func(t *testing.T) { err = c.Put().AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -74,7 +79,7 @@ func TestMaxResourceSize(t *testing.T) { }) t.Run("Patch should limit the request body size", func(t *testing.T) { err = c.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -89,7 +94,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`) err = rest.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } @@ -100,7 +105,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`[{"op":"add","path":"/foo","value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}]`) err = rest.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } @@ -111,7 +116,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.MergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } @@ -122,7 +127,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`) err = rest.Patch(types.MergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } @@ -133,7 +138,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.StrategicMergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %v", err) } @@ -144,7 +149,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`) err = rest.Patch(types.StrategicMergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } @@ -155,7 +160,7 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`) err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected success or bad request err, got %#v", err) } @@ -166,14 +171,14 @@ func TestMaxResourceSize(t *testing.T) { } patchBody := []byte(`{"apiVersion":"v1","kind":"Secret"` + strings.Repeat(" ", 3*1024*1024-100) + `}`) err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(patchBody).Do(context.TODO()).Error() + Body(patchBody).Do(ctx).Error() if err != nil { t.Errorf("unexpected error: %v", err) } }) t.Run("Delete should limit the request body size", func(t *testing.T) { err = c.Delete().AbsPath("/api/v1/namespaces/default/secrets/test"). - Body(hugeData).Do(context.TODO()).Error() + Body(hugeData).Do(ctx).Error() if err == nil { t.Fatalf("unexpected no error") } @@ -197,7 +202,7 @@ values: ` + strings.Repeat("[", 3*1024*1024)) SetHeader("Content-Type", "application/yaml"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected too large error, got %v", err) } @@ -220,7 +225,7 @@ values: ` + strings.Repeat("[", 3*1024*1024/2-500) + strings.Repeat("]", 3*1024* SetHeader("Content-Type", "application/yaml"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) } @@ -243,7 +248,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/yaml"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(yamlBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) } @@ -264,7 +269,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/json"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(jsonBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsRequestEntityTooLargeError(err) { t.Errorf("expected too large error, got %v", err) } @@ -288,7 +293,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/json"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(jsonBody). - DoRaw(context.TODO()) + DoRaw(ctx) // TODO(liggitt): expect bad request on deep nesting, rather than success on dropped unknown field data if err != nil && !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) @@ -313,7 +318,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000)) SetHeader("Content-Type", "application/json"). AbsPath("/api/v1/namespaces/default/configmaps"). Body(jsonBody). - DoRaw(context.TODO()) + DoRaw(ctx) if !apierrors.IsBadRequest(err) { t.Errorf("expected bad request, got %v", err) } diff --git a/test/integration/apiserver/no_new_betas_test.go b/test/integration/apiserver/no_new_betas_test.go index fc7fc9706c9..20e21c2e5a1 100644 --- a/test/integration/apiserver/no_new_betas_test.go +++ b/test/integration/apiserver/no_new_betas_test.go @@ -57,7 +57,7 @@ func TestNoNewBetaAPIsByDefault(t *testing.T) { // if you found this because you want to create an integration test for your new beta API, the method you're looking for // is this setupWithResources method and you need to pass the resource you want to enable into it. - kubeClient, _, tearDownFn := setupWithResources(t, + _, kubeClient, _, tearDownFn := setupWithResources(t, []schema.GroupVersion{}, []schema.GroupVersionResource{}, ) diff --git a/test/integration/apiserver/openapi/openapi_enum_test.go b/test/integration/apiserver/openapi/openapi_enum_test.go index a0a1050ab02..1931f801b20 100644 --- a/test/integration/apiserver/openapi/openapi_enum_test.go +++ b/test/integration/apiserver/openapi/openapi_enum_test.go @@ -17,6 +17,7 @@ limitations under the License. package openapi import ( + "context" "encoding/json" "net/http" "testing" @@ -31,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" generated "k8s.io/kubernetes/pkg/generated/openapi" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestEnablingOpenAPIEnumTypes(t *testing.T) { @@ -54,6 +56,10 @@ func TestEnablingOpenAPIEnumTypes(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.OpenAPIEnums, tc.featureEnabled)() getDefinitionsFn := openapi.GetOpenAPIDefinitionsWithoutDisabledFeatures(func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { @@ -73,7 +79,7 @@ func TestEnablingOpenAPIEnumTypes(t *testing.T) { return defs }) - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { config.GenericConfig.OpenAPIConfig = framework.DefaultOpenAPIConfig() config.GenericConfig.OpenAPIConfig.GetDefinitions = getDefinitionsFn diff --git a/test/integration/apiserver/openapi/openapiv3_test.go b/test/integration/apiserver/openapi/openapiv3_test.go index 6683e720dbb..1f0369e48d9 100644 --- a/test/integration/apiserver/openapi/openapiv3_test.go +++ b/test/integration/apiserver/openapi/openapiv3_test.go @@ -41,13 +41,18 @@ import ( "k8s.io/kube-openapi/pkg/spec3" apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" "sigs.k8s.io/yaml" ) func TestOpenAPIV3SpecRoundTrip(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.OpenAPIV3, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() paths := []string{ @@ -189,9 +194,14 @@ func TestOpenAPIV3ProtoRoundtrip(t *testing.T) { // The OpenAPI V3 proto library strips fields that are sibling elements to $ref // See https://github.com/kubernetes/kubernetes/issues/106387 for more details t.Skip("Skipping OpenAPI V3 Proto roundtrip test") + + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.OpenAPIV3, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() rt, err := restclient.TransportFor(kubeConfig) diff --git a/test/integration/apiserver/patch_test.go b/test/integration/apiserver/patch_test.go index db4077e66aa..a4a8f64db7b 100644 --- a/test/integration/apiserver/patch_test.go +++ b/test/integration/apiserver/patch_test.go @@ -17,7 +17,6 @@ limitations under the License. package apiserver import ( - "context" "fmt" "sync" "sync/atomic" @@ -37,7 +36,7 @@ import ( // Tests that the apiserver retries patches func TestPatchConflicts(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() ns := framework.CreateNamespaceOrDie(clientSet, "status-code", t) @@ -66,7 +65,7 @@ func TestPatchConflicts(t *testing.T) { } // Create the object we're going to conflict on - _, err := clientSet.CoreV1().Secrets(ns.Name).Create(context.TODO(), secret, metav1.CreateOptions{}) + _, err := clientSet.CoreV1().Secrets(ns.Name).Create(ctx, secret, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -89,7 +88,7 @@ func TestPatchConflicts(t *testing.T) { Resource("secrets"). Name("test"). Body([]byte(fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}, "ownerReferences":[{"$patch":"delete","uid":"%s"}]}}`, labelName, value, UIDs[i]))). - Do(context.TODO()). + Do(ctx). Get() if apierrors.IsConflict(err) { @@ -143,7 +142,7 @@ func findOwnerRefByUID(ownerRefs []metav1.OwnerReference, uid types.UID) bool { // with an empty slice is handled property // https://github.com/kubernetes/kubernetes/issues/117470 func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { - clientSet, _, tearDownFn := setup(t) + ctx, clientSet, _, tearDownFn := setup(t) defer tearDownFn() url := "https://foo.com" @@ -153,7 +152,7 @@ func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { AdmissionregistrationV1(). ValidatingWebhookConfigurations(). Create( - context.TODO(), + ctx, &admissionregistrationv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: "base-validation", @@ -178,7 +177,7 @@ func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { AdmissionregistrationV1(). ValidatingWebhookConfigurations(). Patch( - context.TODO(), + ctx, "base-validation", types.StrategicMergePatchType, []byte(` @@ -198,7 +197,7 @@ func TestNestedStrategicMergePatchWithEmpty(t *testing.T) { AdmissionregistrationV1(). ValidatingWebhookConfigurations(). Patch( - context.TODO(), + ctx, "base-validation", types.StrategicMergePatchType, []byte(`{"$setElementOrder/webhooks":[{"name":"new.foo.com"}],"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"admissionregistration.k8s.io/v1\",\"kind\":\"ValidatingWebhookConfiguration\",\"metadata\":{\"annotations\":{},\"name\":\"base-validation\"},\"webhooks\":[{\"admissionReviewVersions\":[\"v1\"],\"clientConfig\":{\"url\":\"https://foo.com\"},\"name\":\"new.foo.com\",\"sideEffects\":\"None\"}]}\n"}},"webhooks":[{"admissionReviewVersions":["v1"],"clientConfig":{"url":"https://foo.com"},"name":"new.foo.com","sideEffects":"None"},{"$patch":"delete","name":"foo.bar.com"}]}`), diff --git a/test/integration/apiserver/podlogs/podlogs_test.go b/test/integration/apiserver/podlogs/podlogs_test.go index a9c6189ab38..1d2ad10061e 100644 --- a/test/integration/apiserver/podlogs/podlogs_test.go +++ b/test/integration/apiserver/podlogs/podlogs_test.go @@ -51,6 +51,7 @@ import ( "k8s.io/client-go/util/keyutil" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestInsecurePodLogs(t *testing.T) { @@ -77,7 +78,11 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq -----END CERTIFICATE----- `)) - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 // I have no idea what this cert is, but it doesn't matter, we just want something that always fails validation @@ -92,7 +97,7 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq })) defer fakeKubeletServer.Close() - pod := prepareFakeNodeAndPod(context.TODO(), t, clientSet, fakeKubeletServer) + pod := prepareFakeNodeAndPod(ctx, t, clientSet, fakeKubeletServer) insecureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{InsecureSkipTLSVerifyBackend: true}).Do(context.TODO()) if err := insecureResult.Error(); err != nil { @@ -104,7 +109,7 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq t.Fatal(insecureStatusCode) } - secureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(context.TODO()) + secureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(ctx) if err := secureResult.Error(); err == nil || !strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { t.Fatal(err) } @@ -250,7 +255,7 @@ func TestPodLogsKubeletClientCertReload(t *testing.T) { Bytes: fakeKubeletServer.Certificate().Raw, })) - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024 opts.KubeletConfig.TLSClientConfig.CAFile = kubeletCA diff --git a/test/integration/apiserver/print_test.go b/test/integration/apiserver/print_test.go index 744e75ffae9..ed4a1405402 100644 --- a/test/integration/apiserver/print_test.go +++ b/test/integration/apiserver/print_test.go @@ -120,7 +120,7 @@ var missingHanlders = sets.NewString( ) func TestServerSidePrint(t *testing.T) { - clientSet, kubeConfig, tearDownFn := setupWithResources(t, + _, clientSet, kubeConfig, tearDownFn := setupWithResources(t, // additional groupversions needed for the test to run []schema.GroupVersion{ {Group: "discovery.k8s.io", Version: "v1"}, diff --git a/test/integration/apiserver/watchcache_test.go b/test/integration/apiserver/watchcache_test.go index ae0b1aa6ec5..4b7c1c56845 100644 --- a/test/integration/apiserver/watchcache_test.go +++ b/test/integration/apiserver/watchcache_test.go @@ -31,11 +31,12 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/controlplane/reconcilers" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // setup create kube-apiserver backed up by two separate etcds, // with one of them containing events and the other all other objects. -func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) { +func multiEtcdSetup(ctx context.Context, t *testing.T) (clientset.Interface, framework.TearDownFunc) { etcdArgs := []string{"--experimental-watch-progress-notify-interval", "1s"} etcd0URL, stopEtcd0, err := framework.RunCustomEtcd("etcd_watchcache0", etcdArgs, nil) if err != nil { @@ -53,7 +54,7 @@ func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) etcdOptions.EtcdServersOverrides = []string{fmt.Sprintf("/events#%s", etcd1URL)} etcdOptions.EnableWatchCache = true - clientSet, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure we're using the same etcd across apiserver restarts. opts.Etcd = etcdOptions @@ -74,7 +75,6 @@ func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) // Everything but default service creation is checked in StartTestServer above by // waiting for post start hooks, so we just wait for default service to exist. // TODO(wojtek-t): Figure out less fragile way. - ctx := context.Background() if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { _, err := clientSet.CoreV1().Services("default").Get(ctx, "kubernetes", metav1.GetOptions{}) return err == nil, nil @@ -85,10 +85,12 @@ func multiEtcdSetup(t *testing.T) (clientset.Interface, framework.TearDownFunc) } func TestWatchCacheUpdatedByEtcd(t *testing.T) { - c, closeFn := multiEtcdSetup(t) - defer closeFn() + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() - ctx := context.Background() + c, closeFn := multiEtcdSetup(ctx, t) + defer closeFn() makeConfigMap := func(name string) *v1.ConfigMap { return &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}} @@ -167,7 +169,11 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) { } func BenchmarkListFromWatchCache(b *testing.B) { - c, _, tearDownFn := framework.StartTestServer(b, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(b) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, _, tearDownFn := framework.StartTestServer(ctx, b, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Switch off endpoints reconciler to avoid unnecessary operations. config.ExtraConfig.EndpointReconcilerType = reconcilers.NoneEndpointReconcilerType @@ -185,7 +191,6 @@ func BenchmarkListFromWatchCache(b *testing.B) { go func() { defer wg.Done() - ctx := context.Background() ns := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("namespace-%d", index)}, } @@ -218,7 +223,6 @@ func BenchmarkListFromWatchCache(b *testing.B) { b.ResetTimer() - ctx := context.Background() opts := metav1.ListOptions{ ResourceVersion: "0", } diff --git a/test/integration/auth/accessreview_test.go b/test/integration/auth/accessreview_test.go index 1272bcedf01..647a8c07793 100644 --- a/test/integration/auth/accessreview_test.go +++ b/test/integration/auth/accessreview_test.go @@ -32,6 +32,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // Inject into control plane an authorizer that uses user info. @@ -57,7 +58,11 @@ func alwaysAlice(req *http.Request) (*authenticator.Response, bool, error) { } func TestSubjectAccessReview(t *testing.T) { - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Unset BearerToken to disable BearerToken authenticator. config.GenericConfig.LoopbackClientConfig.BearerToken = "" @@ -127,7 +132,7 @@ func TestSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), test.sar, metav1.CreateOptions{}) + response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(ctx, test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: @@ -149,7 +154,12 @@ func TestSubjectAccessReview(t *testing.T) { } func TestSelfSubjectAccessReview(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var mutex sync.Mutex + username := "alice" authenticatorFunc := func(req *http.Request) (*authenticator.Response, bool, error) { mutex.Lock() @@ -164,7 +174,7 @@ func TestSelfSubjectAccessReview(t *testing.T) { }, true, nil } - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Unset BearerToken to disable BearerToken authenticator. config.GenericConfig.LoopbackClientConfig.BearerToken = "" @@ -225,7 +235,7 @@ func TestSelfSubjectAccessReview(t *testing.T) { username = test.username mutex.Unlock() - response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.TODO(), test.sar, metav1.CreateOptions{}) + response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: @@ -247,7 +257,11 @@ func TestSelfSubjectAccessReview(t *testing.T) { } func TestLocalSubjectAccessReview(t *testing.T) { - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { // Unset BearerToken to disable BearerToken authenticator. config.GenericConfig.LoopbackClientConfig.BearerToken = "" @@ -345,7 +359,7 @@ func TestLocalSubjectAccessReview(t *testing.T) { } for _, test := range tests { - response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(context.TODO(), test.sar, metav1.CreateOptions{}) + response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(ctx, test.sar, metav1.CreateOptions{}) switch { case err == nil && len(test.expectedError) == 0: diff --git a/test/integration/auth/auth_test.go b/test/integration/auth/auth_test.go index becba2d72e3..674f98dffd7 100644 --- a/test/integration/auth/auth_test.go +++ b/test/integration/auth/auth_test.go @@ -72,6 +72,7 @@ import ( "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/authutil" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -451,7 +452,11 @@ func getTestRequests(namespace string) []testRequest { // // TODO(etune): write a fuzz test of the REST API. func TestAuthModeAlwaysAllow(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -555,7 +560,11 @@ func getPreviousResourceVersionKey(url, id string) string { } func TestAuthModeAlwaysDeny(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -599,7 +608,11 @@ func TestAuthModeAlwaysDeny(t *testing.T) { // TestAliceNotForbiddenOrUnauthorized tests a user who is known to // the authentication system and authorized to do any actions. func TestAliceNotForbiddenOrUnauthorized(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -675,7 +688,11 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) { // the authentication system but not authorized to do any actions // should receive "Forbidden". func TestBobIsForbidden(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -724,7 +741,11 @@ func TestBobIsForbidden(t *testing.T) { // An authorization module is installed in this scenario for integration // test purposes, but requests aren't expected to reach it. func TestUnknownUserIsUnauthorized(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -796,7 +817,11 @@ func (impersonateAuthorizer) Authorize(ctx context.Context, a authorizer.Attribu } func TestImpersonateIsForbidden(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1098,9 +1123,13 @@ func (a *trackingAuthorizer) Authorize(ctx context.Context, attributes authorize // TestAuthorizationAttributeDetermination tests that authorization attributes are built correctly func TestAuthorizationAttributeDetermination(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + trackingAuthorizer := &trackingAuthorizer{} - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1170,7 +1199,11 @@ func TestAuthorizationAttributeDetermination(t *testing.T) { // TestNamespaceAuthorization tests that authorization can be controlled // by namespace. func TestNamespaceAuthorization(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1271,7 +1304,11 @@ func TestNamespaceAuthorization(t *testing.T) { // TestKindAuthorization tests that authorization can be controlled // by namespace. func TestKindAuthorization(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1354,7 +1391,11 @@ func TestKindAuthorization(t *testing.T) { // TestReadOnlyAuthorization tests that authorization can be controlled // by namespace. func TestReadOnlyAuthorization(t *testing.T) { - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -1418,6 +1459,10 @@ func TestWebhookTokenAuthenticatorCustomDial(t *testing.T) { } func testWebhookTokenAuthenticator(customDialer bool, t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + authServer := newTestWebhookTokenAuthServer() defer authServer.Close() var authenticator authenticator.Request @@ -1433,7 +1478,7 @@ func testWebhookTokenAuthenticator(customDialer bool, t *testing.T) { t.Fatalf("error starting webhook token authenticator server: %v", err) } - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} diff --git a/test/integration/auth/bootstraptoken_test.go b/test/integration/auth/bootstraptoken_test.go index b513d10dfe7..3ada62dc14d 100644 --- a/test/integration/auth/bootstraptoken_test.go +++ b/test/integration/auth/bootstraptoken_test.go @@ -18,6 +18,7 @@ package auth import ( "bytes" + "context" "fmt" "io" "net/http" @@ -36,6 +37,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) type bootstrapSecrets []*corev1.Secret @@ -119,9 +121,13 @@ func TestBootstrapTokenAuth(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + authenticator := group.NewAuthenticatedGroupAdder(bearertoken.New(bootstrap.NewTokenAuthenticator(bootstrapSecrets{test.secret}))) - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Authorization.Modes = []string{"AlwaysAllow"} }, diff --git a/test/integration/auth/dynamic_client_test.go b/test/integration/auth/dynamic_client_test.go index 1192e46e656..be862869d0b 100644 --- a/test/integration/auth/dynamic_client_test.go +++ b/test/integration/auth/dynamic_client_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/cmd/kube-apiserver/app/options" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestDynamicClientBuilder(t *testing.T) { @@ -51,7 +52,11 @@ func TestDynamicClientBuilder(t *testing.T) { t.Fatalf("parse duration failed: %v", err) } - baseClient, baseConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + baseClient, baseConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceAccountSigningKeyFile = tmpfile.Name() opts.ServiceAccountTokenMaxExpiration = maxExpirationDuration @@ -95,7 +100,7 @@ func TestDynamicClientBuilder(t *testing.T) { // We want to trigger token rotation here by deleting service account // the dynamic client was using. - if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), saName, metav1.DeleteOptions{}); err != nil { + if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(ctx, saName, metav1.DeleteOptions{}); err != nil { t.Fatalf("delete service account %s failed: %v", saName, err) } time.Sleep(time.Second * 10) diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index fab5964c50d..bbe0d0d5a6d 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -59,6 +59,7 @@ import ( rolebindingstore "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func clientForToken(user string, rt http.RoundTripper) *http.Client { @@ -537,13 +538,18 @@ func TestRBAC(t *testing.T) { "user-with-no-permissions": {Name: "user-with-no-permissions"}, }))) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var tearDownAuthorizerFn func() defer func() { if tearDownAuthorizerFn != nil { tearDownAuthorizerFn() } }() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. // Also disable namespace lifecycle to workaroung the test limitation that first creates @@ -669,19 +675,22 @@ func TestRBAC(t *testing.T) { } func TestBootstrapping(t *testing.T) { - clientset, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Authorization.Modes = []string{"RBAC"} }, }) defer tearDownFn() - watcher, err := clientset.RbacV1().ClusterRoles().Watch(context.TODO(), metav1.ListOptions{ResourceVersion: "0"}) + watcher, err := clientset.RbacV1().ClusterRoles().Watch(ctx, metav1.ListOptions{ResourceVersion: "0"}) if err != nil { t.Fatalf("unexpected error: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() + _, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) { if event.Type != watch.Added { return false, nil @@ -692,7 +701,7 @@ func TestBootstrapping(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - clusterRoles, err := clientset.RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + clusterRoles, err := clientset.RbacV1().ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -708,7 +717,7 @@ func TestBootstrapping(t *testing.T) { t.Errorf("missing cluster-admin: %v", clusterRoles) - healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw(context.TODO()) + healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw(ctx) if err != nil { t.Error(err) } @@ -727,7 +736,11 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { etcdConfig := framework.SharedEtcd() - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure we're using the same etcd across apiserver restarts. opts.Etcd.StorageConfig = *etcdConfig @@ -738,7 +751,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { // Modify the default RBAC discovery ClusterRoleBidnings to look more like the defaults that // existed prior to v1.14, but with user modifications. t.Logf("Modifying default `system:discovery` ClusterRoleBinding") - discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:discovery", metav1.GetOptions{}) + discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:discovery", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err) } @@ -751,21 +764,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { APIGroup: "rbac.authorization.k8s.io", }, } - if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), discRoleBinding, metav1.UpdateOptions{}); err != nil { + if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(ctx, discRoleBinding, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update `system:discovery` ClusterRoleBinding: %v", err) } t.Logf("Modifying default `system:basic-user` ClusterRoleBinding") - basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:basic-user", metav1.GetOptions{}) + basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:basic-user", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err) } basicUserRoleBinding.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "false" basicUserRoleBinding.Annotations["rbac-discovery-upgrade-test"] = "pass" - if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), basicUserRoleBinding, metav1.UpdateOptions{}); err != nil { + if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(ctx, basicUserRoleBinding, metav1.UpdateOptions{}); err != nil { t.Fatalf("Failed to update `system:basic-user` ClusterRoleBinding: %v", err) } t.Logf("Deleting default `system:public-info-viewer` ClusterRoleBinding") - if err = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "system:public-info-viewer", metav1.DeleteOptions{}); err != nil { + if err = client.RbacV1().ClusterRoleBindings().Delete(ctx, "system:public-info-viewer", metav1.DeleteOptions{}); err != nil { t.Fatalf("Failed to delete `system:public-info-viewer` ClusterRoleBinding: %v", err) } @@ -775,7 +788,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { // Check that upgraded API servers inherit `system:public-info-viewer` settings from // `system:discovery`, and respect auto-reconciliation annotations. - client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Ensure we're using the same etcd across apiserver restarts. opts.Etcd.StorageConfig = *etcdConfig @@ -783,21 +796,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) { }, }) - newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:discovery", metav1.GetOptions{}) + newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:discovery", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err) } if !reflect.DeepEqual(newDiscRoleBinding, discRoleBinding) { t.Errorf("`system:discovery` should have been unmodified. Wanted: %v, got %v", discRoleBinding, newDiscRoleBinding) } - newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:basic-user", metav1.GetOptions{}) + newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:basic-user", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err) } if !reflect.DeepEqual(newBasicUserRoleBinding, basicUserRoleBinding) { t.Errorf("`system:basic-user` should have been unmodified. Wanted: %v, got %v", basicUserRoleBinding, newBasicUserRoleBinding) } - publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), "system:public-info-viewer", metav1.GetOptions{}) + publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:public-info-viewer", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get `system:public-info-viewer` ClusterRoleBinding: %v", err) } diff --git a/test/integration/auth/selfsubjectreview_test.go b/test/integration/auth/selfsubjectreview_test.go index 5d81ba80347..ce4c16448c8 100644 --- a/test/integration/auth/selfsubjectreview_test.go +++ b/test/integration/auth/selfsubjectreview_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestGetsSelfAttributes(t *testing.T) { @@ -88,6 +89,10 @@ func TestGetsSelfAttributes(t *testing.T) { }, } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APISelfSubjectReview, true)() var respMu sync.RWMutex @@ -95,7 +100,7 @@ func TestGetsSelfAttributes(t *testing.T) { Name: "stub", } - kubeClient, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1alpha1=true") opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1beta1=true") @@ -122,7 +127,7 @@ func TestGetsSelfAttributes(t *testing.T) { res, err := kubeClient.AuthenticationV1alpha1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -149,7 +154,7 @@ func TestGetsSelfAttributes(t *testing.T) { res2, err := kubeClient.AuthenticationV1beta1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -202,7 +207,6 @@ func TestGetsSelfAttributes(t *testing.T) { } }) } - } func TestGetsSelfAttributesError(t *testing.T) { @@ -211,7 +215,11 @@ func TestGetsSelfAttributesError(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.APISelfSubjectReview, true)() - kubeClient, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + kubeClient, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1alpha1=true") opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1beta1=true") @@ -243,7 +251,7 @@ func TestGetsSelfAttributesError(t *testing.T) { _, err := kubeClient.AuthenticationV1alpha1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{}) if err == nil { t.Fatalf("expected error: %v, got nil", err) } @@ -259,7 +267,7 @@ func TestGetsSelfAttributesError(t *testing.T) { _, err := kubeClient.AuthenticationV1beta1(). SelfSubjectReviews(). - Create(context.TODO(), &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) + Create(ctx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{}) if err == nil { t.Fatalf("expected error: %v, got nil", err) } diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go index f31a1091000..912aba31979 100644 --- a/test/integration/auth/svcaccttoken_test.go +++ b/test/integration/auth/svcaccttoken_test.go @@ -49,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -74,9 +75,13 @@ func TestServiceAccountTokenCreate(t *testing.T) { var tokenGenerator serviceaccount.TokenGenerator + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Start the server var serverAddress string - kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -163,7 +168,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { } warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -172,18 +177,18 @@ func TestServiceAccountTokenCreate(t *testing.T) { treqWithBadName := treq.DeepCopy() treqWithBadName.Name = "invalid-name" - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treqWithBadName, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "must match the service account name") { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treqWithBadName, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "must match the service account name") { t.Fatalf("expected err creating token with mismatched name but got: %#v", resp) } treqWithBadNamespace := treq.DeepCopy() treqWithBadNamespace.Namespace = "invalid-namespace" - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treqWithBadNamespace, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "does not match the namespace") { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treqWithBadNamespace, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "does not match the namespace") { t.Fatalf("expected err creating token with mismatched namespace but got: %#v, %v", resp, err) } warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -227,7 +232,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { } warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -235,7 +240,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -245,21 +250,21 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = pod.UID warningHandler.clear() - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp) } warningHandler.assertEqual(t, nil) // no uid treq.Spec.BoundObjectRef.UID = noUID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -300,7 +305,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { } warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -308,7 +313,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to nonexistant secret but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -318,21 +323,21 @@ func TestServiceAccountTokenCreate(t *testing.T) { // right uid treq.Spec.BoundObjectRef.UID = secret.UID warningHandler.clear() - if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) // wrong uid treq.Spec.BoundObjectRef.UID = wrongUID warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err creating token bound to secret with wrong uid but got: %#v", resp) } warningHandler.assertEqual(t, nil) // no uid treq.Spec.BoundObjectRef.UID = noUID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -368,7 +373,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err == nil { + if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil { t.Fatalf("expected err but got: %#v", resp) } warningHandler.assertEqual(t, nil) @@ -385,7 +390,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -436,7 +441,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = pod.UID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -492,7 +497,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = pod.UID warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -534,7 +539,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -552,7 +557,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { defer del() warningHandler.clear() - treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}) + treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}) if err != nil { t.Fatalf("err: %v", err) } @@ -582,7 +587,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalPod.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) @@ -625,7 +630,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalSecret.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) @@ -670,7 +675,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalSecret.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, nil) @@ -716,7 +721,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { treq.Spec.BoundObjectRef.UID = originalSecret.UID warningHandler.clear() - if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(context.TODO(), sa.Name, treq, metav1.CreateOptions{}); err != nil { + if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil { t.Fatalf("err: %v", err) } warningHandler.assertEqual(t, []string{fmt.Sprintf("requested expiration of %d seconds shortened to %d seconds", tooLongExpirationTime, maxExpirationSeconds)}) @@ -745,9 +750,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Log("get token") warningHandler.clear() - tokenRequest, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken( - context.TODO(), - sa.Name, + tokenRequest, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ Audiences: []string{"api"}, diff --git a/test/integration/clustercidr/ipam_test.go b/test/integration/clustercidr/ipam_test.go index 1f6a1798856..d083cb8db1f 100644 --- a/test/integration/clustercidr/ipam_test.go +++ b/test/integration/clustercidr/ipam_test.go @@ -43,11 +43,14 @@ import ( ) func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -61,9 +64,6 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -130,10 +130,14 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -147,9 +151,6 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -207,10 +208,14 @@ func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -224,9 +229,6 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -303,10 +305,14 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -320,9 +326,6 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) @@ -388,10 +391,14 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { } func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // set the feature gate to enable MultiCIDRRangeAllocator defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} @@ -405,9 +412,6 @@ func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) { ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go ipamController.Run(ctx) sharedInformer.Start(ctx.Done()) diff --git a/test/integration/controlplane/synthetic_controlplane_test.go b/test/integration/controlplane/synthetic_controlplane_test.go index 443c27170ed..6efc4988834 100644 --- a/test/integration/controlplane/synthetic_controlplane_test.go +++ b/test/integration/controlplane/synthetic_controlplane_test.go @@ -46,6 +46,7 @@ import ( kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -189,7 +190,11 @@ func TestStatus(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(options *options.ServerRunOptions) { if tc.modifyOptions != nil { tc.modifyOptions(options) diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index fd175b4491a..bfd2e43f010 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -39,7 +39,6 @@ import ( "k8s.io/client-go/tools/events" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/retry" - "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -50,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/profile" labelsutil "k8s.io/kubernetes/pkg/util/labels" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) var zero = int64(0) @@ -59,6 +59,9 @@ func setup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *d } func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + modifyServerRunOptions := serverSetup.ModifyServerRunOptions serverSetup.ModifyServerRunOptions = func(opts *options.ServerRunOptions) { if modifyServerRunOptions != nil { @@ -73,11 +76,9 @@ func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) ( ) } - clientSet, config, closeFn := framework.StartTestServer(t, serverSetup) + clientSet, config, closeFn := framework.StartTestServer(ctx, t, serverSetup) resyncPeriod := 12 * time.Hour - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "daemonset-informers")), resyncPeriod) dc, err := daemon.NewDaemonSetsController( ctx, @@ -487,13 +488,13 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } defer cleanupDaemonSets(t, clientset, ds) - _, err = nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -520,7 +521,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -622,7 +623,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) { }, } - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -657,7 +658,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -668,7 +669,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionFalse}, } - _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -699,7 +700,7 @@ func TestInsufficientCapacityNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m") ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -708,7 +709,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node := newNode("node-with-limited-memory", nil) node.Status.Allocatable = allocatableResources("100M", "200m") - _, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, node, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -727,7 +728,7 @@ func TestInsufficientCapacityNode(t *testing.T) { node1 := newNode("node-with-enough-memory", nil) node1.Status.Allocatable = allocatableResources("200M", "2000m") - _, err = nodeClient.Create(context.TODO(), node1, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, node1, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -754,7 +755,7 @@ func TestLaunchWithHashCollision(t *testing.T) { go dc.Run(ctx, 2) // Create single node - _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) + _, err := nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -768,7 +769,7 @@ func TestLaunchWithHashCollision(t *testing.T) { MaxUnavailable: &oneIntString, }, } - ds, err := dsClient.Create(context.TODO(), orgDs, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, orgDs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -779,7 +780,7 @@ func TestLaunchWithHashCollision(t *testing.T) { t.Fatalf("Failed to create DaemonSet: %v", err) } - ds, err = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get DaemonSet: %v", err) } @@ -790,7 +791,7 @@ func TestLaunchWithHashCollision(t *testing.T) { // Look up the ControllerRevision for the DaemonSet _, name := hashAndNameForDaemonSet(ds) - revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) + revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil || revision == nil { t.Fatalf("Failed to look up ControllerRevision: %v", err) } @@ -812,7 +813,7 @@ func TestLaunchWithHashCollision(t *testing.T) { Data: revision.Data, Revision: revision.Revision + 1, } - _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision, metav1.CreateOptions{}) + _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, newRevision, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create ControllerRevision: %v", err) } @@ -859,7 +860,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { go dc.Run(ctx, 2) // Create single node - _, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{}) + _, err := nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -873,7 +874,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { MaxUnavailable: &oneIntString, }, } - ds, err := dsClient.Create(context.TODO(), orgDs, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, orgDs, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -884,14 +885,14 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { t.Fatalf("Failed to create DaemonSet: %v", err) } - ds, err = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}) + ds, err = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to get DaemonSet: %v", err) } // Look up the ControllerRevision for the DaemonSet _, name := hashAndNameForDaemonSet(ds) - revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) + revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil || revision == nil { t.Fatalf("Failed to look up ControllerRevision: %v", err) } @@ -913,14 +914,14 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { Data: revision.Data, Revision: revision.Revision + 1, } - _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), newRevision, metav1.CreateOptions{}) + _, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, newRevision, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create ControllerRevision: %v", err) } t.Logf("revision: %v", newName) // ensure the daemonset to be synced - _, err = nodeClient.Create(context.TODO(), newNode("second-node", nil), metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, newNode("second-node", nil), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -946,7 +947,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) { } err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { - revs, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).List(context.TODO(), metav1.ListOptions{}) + revs, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list controllerrevision: %v", err) } @@ -990,7 +991,7 @@ func TestTaintedNode(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + ds, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } @@ -999,13 +1000,13 @@ func TestTaintedNode(t *testing.T) { nodeWithTaint := newNode("node-with-taint", nil) nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}} - _, err = nodeClient.Create(context.TODO(), nodeWithTaint, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, nodeWithTaint, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create nodeWithTaint: %v", err) } nodeWithoutTaint := newNode("node-without-taint", nil) - _, err = nodeClient.Create(context.TODO(), nodeWithoutTaint, metav1.CreateOptions{}) + _, err = nodeClient.Create(ctx, nodeWithoutTaint, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create nodeWithoutTaint: %v", err) } @@ -1014,13 +1015,13 @@ func TestTaintedNode(t *testing.T) { validateDaemonSetStatus(dsClient, ds.Name, 1, t) // remove taint from nodeWithTaint - nodeWithTaint, err = nodeClient.Get(context.TODO(), "node-with-taint", metav1.GetOptions{}) + nodeWithTaint, err = nodeClient.Get(ctx, "node-with-taint", metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to retrieve nodeWithTaint: %v", err) } nodeWithTaintCopy := nodeWithTaint.DeepCopy() nodeWithTaintCopy.Spec.Taints = []v1.Taint{} - _, err = nodeClient.Update(context.TODO(), nodeWithTaintCopy, metav1.UpdateOptions{}) + _, err = nodeClient.Update(ctx, nodeWithTaintCopy, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to update nodeWithTaint: %v", err) } @@ -1119,7 +1120,7 @@ func TestUpdateStatusDespitePodCreationFailure(t *testing.T) { ds := newDaemonSet("foo", ns.Name) ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{}) + _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create DaemonSet: %v", err) } diff --git a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go index 39ee9adbf95..d9f24cd5509 100644 --- a/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go +++ b/test/integration/defaulttolerationseconds/defaulttolerationseconds_test.go @@ -20,16 +20,21 @@ import ( "context" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestAdmission(t *testing.T) { - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(cfg *controlplane.Config) { cfg.GenericConfig.EnableProfiling = true cfg.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds() @@ -55,7 +60,7 @@ func TestAdmission(t *testing.T) { }, } - updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(ctx, &pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating pod: %v", err) } diff --git a/test/integration/dualstack/dualstack_endpoints_test.go b/test/integration/dualstack/dualstack_endpoints_test.go index 35beff8ee3e..24c2c02f4ed 100644 --- a/test/integration/dualstack/dualstack_endpoints_test.go +++ b/test/integration/dualstack/dualstack_endpoints_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpointslice" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestDualStackEndpoints(t *testing.T) { @@ -43,7 +44,11 @@ func TestDualStackEndpoints(t *testing.T) { return map[string]string{"foo": "bar"} } - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. @@ -54,7 +59,7 @@ func TestDualStackEndpoints(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -83,7 +88,7 @@ func TestDualStackEndpoints(t *testing.T) { }, }, } - if _, err := client.CoreV1().Nodes().Create(context.TODO(), testNode, metav1.CreateOptions{}); err != nil { + if _, err := client.CoreV1().Nodes().Create(ctx, testNode, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) } @@ -103,8 +108,6 @@ func TestDualStackEndpoints(t *testing.T) { client, 1*time.Second) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // Start informer and controllers informers.Start(ctx.Done()) // use only one worker to serialize the updates @@ -166,7 +169,7 @@ func TestDualStackEndpoints(t *testing.T) { }, } - createdPod, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) + createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create pod %s: %v", pod.Name, err) } @@ -177,7 +180,7 @@ func TestDualStackEndpoints(t *testing.T) { Phase: v1.PodRunning, PodIPs: []v1.PodIP{{IP: podIPbyFamily[v1.IPv4Protocol]}, {IP: podIPbyFamily[v1.IPv6Protocol]}}, } - _, err = client.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), createdPod, metav1.UpdateOptions{}) + _, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) } @@ -205,7 +208,7 @@ func TestDualStackEndpoints(t *testing.T) { } // create a service - _, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating service: %v", err) } @@ -214,7 +217,7 @@ func TestDualStackEndpoints(t *testing.T) { // legacy endpoints are not dual stack // and use the address of the first IP family if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { - e, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + e, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Logf("Error fetching endpoints: %v", err) return false, nil @@ -236,7 +239,7 @@ func TestDualStackEndpoints(t *testing.T) { // wait until the endpoint slices are created err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { lSelector := discovery.LabelServiceName + "=" + svc.Name - esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: lSelector}) + esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector}) if err != nil { t.Logf("Error listing EndpointSlices: %v", err) return false, nil diff --git a/test/integration/dualstack/dualstack_test.go b/test/integration/dualstack/dualstack_test.go index b052d1c45d9..e2752b1a72e 100644 --- a/test/integration/dualstack/dualstack_test.go +++ b/test/integration/dualstack/dualstack_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" netutils "k8s.io/utils/net" ) @@ -45,7 +46,11 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { // Create an IPv4 single stack control-plane serviceCIDR := "10.0.0.0/16" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -54,7 +59,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -247,7 +252,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -256,7 +261,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -272,7 +277,11 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { // Create an IPv6 only dual stack control-plane serviceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10") @@ -282,7 +291,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -462,7 +471,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -471,7 +480,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -488,7 +497,11 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -497,7 +510,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -724,7 +737,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -733,7 +746,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -751,7 +764,11 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { serviceCIDR := "2001:db8:1::/112" secondaryServiceCIDR := "10.0.0.0/16" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10") @@ -761,7 +778,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -772,7 +789,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { // verify client is working if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - _, err := client.CoreV1().Endpoints("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { t.Logf("error fetching endpoints: %v", err) return false, nil @@ -943,7 +960,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if (err != nil) != tc.expectError { t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err) } @@ -952,7 +969,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { return } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -970,7 +987,11 @@ func TestUpgradeDowngrade(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -979,7 +1000,7 @@ func TestUpgradeDowngrade(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1006,12 +1027,12 @@ func TestUpgradeDowngrade(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error while creating service:%v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1023,7 +1044,7 @@ func TestUpgradeDowngrade(t *testing.T) { // upgrade it requireDualStack := v1.IPFamilyPolicyRequireDualStack svc.Spec.IPFamilyPolicy = &requireDualStack - upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error upgrading service to dual stack. %v", err) } @@ -1036,7 +1057,7 @@ func TestUpgradeDowngrade(t *testing.T) { upgraded.Spec.IPFamilyPolicy = &singleStack upgraded.Spec.ClusterIPs = upgraded.Spec.ClusterIPs[0:1] upgraded.Spec.IPFamilies = upgraded.Spec.IPFamilies[0:1] - downgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), upgraded, metav1.UpdateOptions{}) + downgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, upgraded, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error downgrading service to single stack. %v", err) } @@ -1046,7 +1067,7 @@ func TestUpgradeDowngrade(t *testing.T) { // run test again this time without removing secondary IPFamily or ClusterIP downgraded.Spec.IPFamilyPolicy = &requireDualStack - upgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), downgraded, metav1.UpdateOptions{}) + upgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, downgraded, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error upgrading service to dual stack. %v", err) } @@ -1057,7 +1078,7 @@ func TestUpgradeDowngrade(t *testing.T) { upgradedAgain.Spec.IPFamilyPolicy = &singleStack // api-server automatically removes the secondary ClusterIP and IPFamily // when a servie is downgraded. - downgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), upgradedAgain, metav1.UpdateOptions{}) + downgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, upgradedAgain, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error downgrading service to single stack. %v", err) } @@ -1073,7 +1094,11 @@ func TestConvertToFromExternalName(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -1082,7 +1107,7 @@ func TestConvertToFromExternalName(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1108,12 +1133,12 @@ func TestConvertToFromExternalName(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error while creating service:%v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1127,7 +1152,7 @@ func TestConvertToFromExternalName(t *testing.T) { svc.Spec.ClusterIP = "" // not clearing ClusterIPs svc.Spec.ExternalName = "something.somewhere" - externalNameSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + externalNameSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error converting service to external name. %v", err) } @@ -1139,7 +1164,7 @@ func TestConvertToFromExternalName(t *testing.T) { // convert to a ClusterIP service externalNameSvc.Spec.Type = v1.ServiceTypeClusterIP externalNameSvc.Spec.ExternalName = "" - clusterIPSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), externalNameSvc, metav1.UpdateOptions{}) + clusterIPSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, externalNameSvc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error converting service to ClusterIP. %v", err) } @@ -1154,7 +1179,11 @@ func TestPreferDualStack(t *testing.T) { serviceCIDR := "10.0.0.0/16" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) }, @@ -1163,7 +1192,7 @@ func TestPreferDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1193,12 +1222,12 @@ func TestPreferDualStack(t *testing.T) { } // create a service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error while creating service:%v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1209,7 +1238,7 @@ func TestPreferDualStack(t *testing.T) { // update it svc.Spec.Selector = map[string]string{"foo": "bar"} - upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Fatalf("unexpected error upgrading service to dual stack. %v", err) } @@ -1227,7 +1256,11 @@ func TestServiceUpdate(t *testing.T) { // Create an IPv4 single stack control-plane serviceCIDR := "10.0.0.0/16" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -1236,7 +1269,7 @@ func TestServiceUpdate(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1262,26 +1295,26 @@ func TestServiceUpdate(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) // if no error was expected validate the service otherwise return if err != nil { t.Errorf("unexpected error creating service:%v", err) return } - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Unexpected error to get the service %s %v", svc.Name, err) } // update using put svc.Labels = map[string]string{"x": "y"} - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Update(context.TODO(), svc, metav1.UpdateOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{}) if err != nil { t.Errorf("Unexpected error updating the service %s %v", svc.Name, err) } - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1296,12 +1329,12 @@ func TestServiceUpdate(t *testing.T) { t.Fatalf("failed to json.Marshal labels: %v", err) } - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("unexpected error patching service using strategic merge patch. %v", err) } - current, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + current, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1323,17 +1356,16 @@ func TestServiceUpdate(t *testing.T) { t.Fatalf("unexpected error creating json patch. %v", err) } - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("unexpected error patching service using merge patch. %v", err) } // validate the service was created correctly if it was not expected to fail - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } - } // validateServiceAndClusterIPFamily checks that the service has the expected IPFamilies @@ -1386,10 +1418,14 @@ func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1. func TestUpgradeServicePreferToDualStack(t *testing.T) { sharedEtcd := framework.SharedEtcd() + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Create an IPv4 only dual stack control-plane serviceCIDR := "192.168.0.0/24" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = serviceCIDR @@ -1398,7 +1434,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1429,12 +1465,12 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1447,7 +1483,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) @@ -1457,7 +1493,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1466,7 +1502,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { t.Fatalf("creating kubernetes service timed out") } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1479,11 +1515,15 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { func TestDowngradeServicePreferToDualStack(t *testing.T) { sharedEtcd := framework.SharedEtcd() + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Create a dual stack control-plane serviceCIDR := "192.168.0.0/24" secondaryServiceCIDR := "2001:db8:1::/112" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) @@ -1492,7 +1532,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1521,12 +1561,12 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { }, } // create the service - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } // validate the service was created correctly if it was not expected to fail - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1537,7 +1577,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { tearDownFn() // reset secondary - client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{ + client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.Etcd.StorageConfig = *sharedEtcd opts.ServiceClusterIPRanges = serviceCIDR @@ -1547,7 +1587,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { // Wait until the default "kubernetes" service is created. if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -1556,7 +1596,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { t.Fatalf("creating kubernetes service timed out") } // validate the service is still there. - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err) } @@ -1576,7 +1616,11 @@ type specMergePatch struct { // tests success when converting ClusterIP:Headless service to ExternalName func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) { - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{}) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{}) defer tearDownFn() ns := framework.CreateNamespaceOrDie(client, "test-service-allocate-node-ports", t) @@ -1594,7 +1638,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) { } var err error - service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + service, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating test service: %v", err) } @@ -1610,7 +1654,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) { t.Fatalf("failed to json.Marshal ports: %v", err) } - _, err = client.CoreV1().Services(ns.Name).Patch(context.TODO(), service.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.CoreV1().Services(ns.Name).Patch(ctx, service.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("unexpected error patching service using strategic merge patch. %v", err) } diff --git a/test/integration/examples/webhook_test.go b/test/integration/examples/webhook_test.go index 0c7f04be765..6dcf2c309f7 100644 --- a/test/integration/examples/webhook_test.go +++ b/test/integration/examples/webhook_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/pkg/controlplane/reconcilers" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestWebhookLoopback(t *testing.T) { @@ -41,7 +42,11 @@ func TestWebhookLoopback(t *testing.T) { called := int32(0) - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { }, ModifyServerConfig: func(config *controlplane.Config) { @@ -67,7 +72,7 @@ func TestWebhookLoopback(t *testing.T) { fail := admissionregistrationv1.Fail noSideEffects := admissionregistrationv1.SideEffectClassNone - _, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{Name: "webhooktest.example.com"}, Webhooks: []admissionregistrationv1.MutatingWebhook{{ Name: "webhooktest.example.com", @@ -88,7 +93,7 @@ func TestWebhookLoopback(t *testing.T) { } err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) { - _, err = client.CoreV1().ConfigMaps("default").Create(context.TODO(), &v1.ConfigMap{ + _, err = client.CoreV1().ConfigMaps("default").Create(ctx, &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "webhook-test"}, Data: map[string]string{"invalid key": "value"}, }, metav1.CreateOptions{}) diff --git a/test/integration/framework/test_server.go b/test/integration/framework/test_server.go index 1fea953a983..ce1599ea544 100644 --- a/test/integration/framework/test_server.go +++ b/test/integration/framework/test_server.go @@ -59,13 +59,14 @@ type TestServerSetup struct { type TearDownFunc func() // StartTestServer runs a kube-apiserver, optionally calling out to the setup.ModifyServerRunOptions and setup.ModifyServerConfig functions -func StartTestServer(t testing.TB, setup TestServerSetup) (client.Interface, *rest.Config, TearDownFunc) { +func StartTestServer(ctx context.Context, t testing.TB, setup TestServerSetup) (client.Interface, *rest.Config, TearDownFunc) { + ctx, cancel := context.WithCancel(ctx) + certDir, err := os.MkdirTemp("", "test-integration-"+strings.ReplaceAll(t.Name(), "/", "_")) if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) } - ctx, cancel := context.WithCancel(context.Background()) var errCh chan error tearDownFn := func() { // Calling cancel function is stopping apiserver and cleaning up diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 2380ddcbb55..33986da2e1a 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -123,7 +123,11 @@ func TestPerformance(t *testing.T) { t.Skip("Skipping because we want to run short tests") } - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} diff --git a/test/integration/network/services_test.go b/test/integration/network/services_test.go index 5a93a5d856a..294d640bbaa 100644 --- a/test/integration/network/services_test.go +++ b/test/integration/network/services_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/controlplane" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) // TestServicesFinalizersRepairLoop tests that Services participate in the object @@ -41,7 +42,11 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { clusterIP := "10.0.0.20" interval := 5 * time.Second - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -53,7 +58,7 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { // verify client is working if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - _, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { t.Logf("error fetching endpoints: %v", err) return false, nil @@ -82,20 +87,20 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { } // Create service - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), &svcNodePort, metav1.CreateOptions{}); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil { t.Errorf("unexpected error creating service: %v", err) } t.Logf("Created service: %s", svcNodePort.Name) // Check the service has been created correctly - svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{}) + svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{}) if err != nil || svc.Spec.ClusterIP != clusterIP { t.Errorf("created service is not correct: %v", err) } t.Logf("Service created successfully: %v", svc) // Delete service - if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(context.TODO(), svcNodePort.Name, metav1.DeleteOptions{}); err != nil { + if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(ctx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil { t.Errorf("unexpected error deleting service: %v", err) } t.Logf("Deleted service: %s", svcNodePort.Name) @@ -104,26 +109,26 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { time.Sleep(interval + 1) // Check that the service was not deleted and the IP is already allocated - svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{}) + svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{}) if err != nil || svc.Spec.ClusterIP != clusterIP { t.Errorf("created service is not correct: %v", err) } t.Logf("Service after Delete: %v", svc) // Remove the finalizer - if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil { + if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil { t.Errorf("unexpected error removing finalizer: %v", err) } t.Logf("Removed service finalizer: %s", svcNodePort.Name) // Check that the service was deleted - _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{}) + _, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{}) if err == nil { t.Errorf("service was not delete: %v", err) } // Try to create service again - if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), &svcNodePort, metav1.CreateOptions{}); err != nil { + if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil { t.Errorf("unexpected error creating service: %v", err) } t.Logf("Created service: %s", svcNodePort.Name) @@ -133,7 +138,11 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { func TestServiceCIDR28bits(t *testing.T) { serviceCIDR := "10.0.0.0/28" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -142,7 +151,7 @@ func TestServiceCIDR28bits(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil { return false, err } @@ -169,7 +178,7 @@ func TestServiceCIDR28bits(t *testing.T) { }, } - _, err := client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + _, err := client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{}) if err != nil { t.Fatalf("Error creating test service: %v", err) } diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index d7c02dab7ad..c5e7c55b06a 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -60,8 +60,12 @@ const ( // quota_test.go:100: Took 4.196205966s to scale up without quota // quota_test.go:115: Took 12.021640372s to scale up with quota func TestQuota(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Set up a API server - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -76,10 +80,6 @@ func TestQuota(t *testing.T) { ns2 := framework.CreateNamespaceOrDie(clientset, "non-quotaed", t) defer framework.DeleteNamespaceOrDie(clientset, ns2, t) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) rm := replicationcontroller.NewReplicationManager( informers.Core().V1().Pods(), @@ -290,8 +290,12 @@ plugins: t.Fatal(err) } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Set up an API server - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -306,10 +310,6 @@ plugins: ns := framework.CreateNamespaceOrDie(clientset, "quota", t) defer framework.DeleteNamespaceOrDie(clientset, ns, t) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) rm := replicationcontroller.NewReplicationManager( informers.Core().V1().Pods(), @@ -417,8 +417,12 @@ plugins: t.Fatal(err) } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // Set up an API server - _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} @@ -433,10 +437,6 @@ plugins: ns := framework.CreateNamespaceOrDie(clientset, "quota", t) defer framework.DeleteNamespaceOrDie(clientset, ns, t) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) rm := replicationcontroller.NewReplicationManager( informers.Core().V1().Pods(), diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go index 83ab1b1ccd5..3261452f6a3 100644 --- a/test/integration/scheduler/queue_test.go +++ b/test/integration/scheduler/queue_test.go @@ -291,8 +291,12 @@ func TestCustomResourceEnqueue(t *testing.T) { testfwk.SharedEtcd(), ) testCtx := &testutils.TestContext{} - testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background()) - testCtx.CloseFn = func() { server.TearDownFn() } + ctx, cancel := context.WithCancel(context.Background()) + testCtx.Ctx = ctx + testCtx.CloseFn = func() { + cancel() + server.TearDownFn() + } apiExtensionClient := apiextensionsclient.NewForConfigOrDie(server.ClientConfig) dynamicClient := dynamic.NewForConfigOrDie(server.ClientConfig) diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index dc191e2e157..dbfed434ee2 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -80,7 +80,7 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc // Run API server with minimimal logging by default. Can be raised with -v. framework.MinVerbosity = 0 - _, kubeConfig, tearDownFn := framework.StartTestServer(b, framework.TestServerSetup{ + _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, b, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority"} diff --git a/test/integration/serviceaccount/service_account_test.go b/test/integration/serviceaccount/service_account_test.go index e9e18e22761..e19e4ab178d 100644 --- a/test/integration/serviceaccount/service_account_test.go +++ b/test/integration/serviceaccount/service_account_test.go @@ -49,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/serviceaccount" serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -59,7 +60,11 @@ const ( ) func TestServiceAccountAutoCreate(t *testing.T) { - c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -68,7 +73,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { ns := "test-service-account-creation" // Create namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("could not create namespace: %v", err) } @@ -80,7 +85,7 @@ func TestServiceAccountAutoCreate(t *testing.T) { } // Delete service account - err = c.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), defaultUser.Name, metav1.DeleteOptions{}) + err = c.CoreV1().ServiceAccounts(ns).Delete(ctx, defaultUser.Name, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Could not delete default serviceaccount: %v", err) } @@ -96,7 +101,11 @@ func TestServiceAccountAutoCreate(t *testing.T) { } func TestServiceAccountTokenAutoMount(t *testing.T) { - c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -105,7 +114,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { ns := "auto-mount-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } @@ -123,7 +132,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { }, } - createdPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), &protoPod, metav1.CreateOptions{}) + createdPod, err := c.CoreV1().Pods(ns).Create(ctx, &protoPod, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } @@ -138,7 +147,11 @@ func TestServiceAccountTokenAutoMount(t *testing.T) { } func TestServiceAccountTokenAuthentication(t *testing.T) { - c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -148,19 +161,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { otherns := "other-ns" // Create "my" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "other" namespace - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } // Create "ro" user in myns - roSA, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) + roSA, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -175,13 +188,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { roClient := clientset.NewForConfigOrDie(&roClientConfig) doServiceAccountAPIRequests(t, roClient, myns, true, true, false) doServiceAccountAPIRequests(t, roClient, otherns, true, false, false) - err = c.CoreV1().Secrets(myns).Delete(context.TODO(), roTokenName, metav1.DeleteOptions{}) + err = c.CoreV1().Secrets(myns).Delete(ctx, roTokenName, metav1.DeleteOptions{}) if err != nil { t.Fatalf("could not delete token: %v", err) } // wait for delete to be observed and reacted to via watch err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { - _, err := roClient.CoreV1().Secrets(myns).List(context.TODO(), metav1.ListOptions{}) + _, err := roClient.CoreV1().Secrets(myns).List(ctx, metav1.ListOptions{}) if err == nil { t.Logf("token is still valid, waiting") return false, nil @@ -198,7 +211,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { doServiceAccountAPIRequests(t, roClient, myns, false, false, false) // Create "rw" user in myns - rwSA, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{}) + rwSA, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -215,8 +228,12 @@ func TestServiceAccountTokenAuthentication(t *testing.T) { } func TestLegacyServiceAccountTokenTracking(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.LegacyServiceAccountTokenTracking, true)() - c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t) + c, config, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(ctx, t) defer stopFunc() if err != nil { t.Fatalf("failed to setup ServiceAccounts server: %v", err) @@ -224,11 +241,11 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { // create service account myns := "auth-ns" - _, err = c.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) + _, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{}) if err != nil && !apierrors.IsAlreadyExists(err) { t.Fatalf("could not create namespace: %v", err) } - mysa, err := c.CoreV1().ServiceAccounts(myns).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) + mysa, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{}) if err != nil { t.Fatalf("Service Account not created: %v", err) } @@ -287,7 +304,7 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { } wg.Wait() dateAfter := time.Now().UTC().Format(dateFormat) - liveSecret, err := c.CoreV1().Secrets(myns).Get(context.TODO(), test.secretName, metav1.GetOptions{}) + liveSecret, err := c.CoreV1().Secrets(myns).Get(ctx, test.secretName, metav1.GetOptions{}) if err != nil { t.Fatalf("Could not get secret: %v", err) } @@ -314,7 +331,7 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { // configmap should exist with 'since' timestamp. if err = wait.PollImmediate(time.Millisecond*10, wait.ForeverTestTimeout, func() (bool, error) { dateBefore := time.Now().UTC().Format("2006-01-02") - configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), legacytokentracking.ConfigMapName, metav1.GetOptions{}) + configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, legacytokentracking.ConfigMapName, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get %q configmap, err %w", legacytokentracking.ConfigMapDataKey, err) } @@ -334,11 +351,13 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) { // startServiceAccountTestServerAndWaitForCaches returns a started server // It is the responsibility of the caller to ensure the returned stopFunc is called -func startServiceAccountTestServerAndWaitForCaches(t *testing.T) (clientset.Interface, *restclient.Config, func(), error) { +func startServiceAccountTestServerAndWaitForCaches(ctx context.Context, t *testing.T) (clientset.Interface, *restclient.Config, func(), error) { var serviceAccountKey interface{} + ctx, cancel := context.WithCancel(ctx) + // Set up a API server - rootClientset, clientConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + rootClientset, clientConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { var err error serviceAccountKey, err = keyutil.PrivateKeyFromFile(opts.ServiceAccountSigningKeyFile) @@ -379,7 +398,6 @@ func startServiceAccountTestServerAndWaitForCaches(t *testing.T) (clientset.Inte }, }) - ctx, cancel := context.WithCancel(context.Background()) stop := func() { cancel() tearDownFn() diff --git a/test/integration/servicecidr/allocator_test.go b/test/integration/servicecidr/allocator_test.go index 45a2523d3b6..5c398ccb2f2 100644 --- a/test/integration/servicecidr/allocator_test.go +++ b/test/integration/servicecidr/allocator_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" netutils "k8s.io/utils/net" ) @@ -43,7 +44,11 @@ func TestServiceAlloc(t *testing.T) { // Create an IPv4 single stack control-plane serviceCIDR := "192.168.0.0/29" - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR }, @@ -115,7 +120,11 @@ func TestServiceAllocIPAddress(t *testing.T) { serviceCIDR := "2001:db8::/64" defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)() - client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(opts *options.ServerRunOptions) { opts.ServiceClusterIPRanges = serviceCIDR opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10") @@ -140,7 +149,7 @@ func TestServiceAllocIPAddress(t *testing.T) { // Wait until the default "kubernetes" service is created. if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { - _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return false, err } @@ -151,11 +160,11 @@ func TestServiceAllocIPAddress(t *testing.T) { // create 5 random services and check that the Services have an IP associated for i := 0; i < 5; i++ { - svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc(i), metav1.CreateOptions{}) + svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc(i), metav1.CreateOptions{}) if err != nil { t.Error(err) } - _, err = client.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), svc.Spec.ClusterIP, metav1.GetOptions{}) + _, err = client.NetworkingV1alpha1().IPAddresses().Get(ctx, svc.Spec.ClusterIP, metav1.GetOptions{}) if err != nil { t.Error(err) } diff --git a/test/integration/statefulset/statefulset_test.go b/test/integration/statefulset/statefulset_test.go index 5d0409327ea..812f86560d3 100644 --- a/test/integration/statefulset/statefulset_test.go +++ b/test/integration/statefulset/statefulset_test.go @@ -351,8 +351,12 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 // add for issue: https://github.com/kubernetes/kubernetes/issues/108837 func TestStatefulSetStatusWithPodFail(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + limitedPodNumber := 2 - c, config, closeFn := framework.StartTestServer(t, framework.TestServerSetup{ + c, config, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerConfig: func(config *controlplane.Config) { config.GenericConfig.AdmissionControl = &fakePodFailAdmission{ limitedPodNumber: limitedPodNumber, @@ -363,9 +367,6 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { resyncPeriod := 12 * time.Hour informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod) - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() ssc := statefulset.NewStatefulSetController( ctx, informers.Core().V1().Pods(), @@ -382,7 +383,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { go ssc.Run(ctx, 5) sts := newSTS("sts", ns.Name, 4) - _, err := c.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts, metav1.CreateOptions{}) + _, err := c.AppsV1().StatefulSets(sts.Namespace).Create(ctx, sts, metav1.CreateOptions{}) if err != nil { t.Fatalf("Could not create statefuleSet %s: %v", sts.Name, err) } @@ -390,7 +391,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { wantReplicas := limitedPodNumber var gotReplicas int32 if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(context.TODO(), sts.Name, metav1.GetOptions{}) + newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(ctx, sts.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 9df62213ab3..2dececa8791 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -58,6 +58,7 @@ import ( taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/kubernetes/test/integration/framework" imageutils "k8s.io/kubernetes/test/utils/image" + "k8s.io/kubernetes/test/utils/ktesting" "k8s.io/utils/pointer" ) @@ -151,8 +152,6 @@ type TestContext struct { Scheduler *scheduler.Scheduler // This is the top context when initializing the test environment. Ctx context.Context - // CancelFn will cancel the context above. - CancelFn context.CancelFunc // CloseFn will stop the apiserver and clean up the resources // after itself, including shutting down its storage layer. CloseFn framework.TearDownFunc @@ -210,14 +209,10 @@ func SyncSchedulerInformerFactory(testCtx *TestContext) { // CleanupTest cleans related resources which were created during integration test func CleanupTest(t *testing.T, testCtx *TestContext) { - // Cancel the context of the whole test environment, it will terminate the scheduler as well. - testCtx.CancelFn() - // Cleanup nodes and namespaces. testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(testCtx.Ctx, *metav1.NewDeleteOptions(0), metav1.ListOptions{}) framework.DeleteNamespaceOrDie(testCtx.ClientSet, testCtx.NS, t) - - // Terminate the apiserver. + // Terminate the scheduler and apiserver. testCtx.CloseFn() } @@ -356,13 +351,11 @@ func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error { // It registers cleanup functions to t.Cleanup(), they will be called when the test completes, // no need to do this again. func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext { - ctx, cancel := context.WithCancel(context.Background()) - testCtx := TestContext{ - Ctx: ctx, - CancelFn: cancel, - } + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + testCtx := TestContext{Ctx: ctx} - testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{ + testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{ ModifyServerRunOptions: func(options *options.ServerRunOptions) { options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority", "StorageObjectInUseProtection"} }, @@ -373,6 +366,12 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf }, }) + oldCloseFn := testCtx.CloseFn + testCtx.CloseFn = func() { + cancel() + oldCloseFn() + } + if nsPrefix != "default" { testCtx.NS = framework.CreateNamespaceOrDie(testCtx.ClientSet, nsPrefix+string(uuid.NewUUID()), t) } else { diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 0807aaef9ca..d91554b7f7e 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -998,7 +998,6 @@ func TestRescheduleProvisioning(t *testing.T) { ns := testCtx.NS.Name defer func() { - testCtx.CancelFn() deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) }()