Merge pull request #122293 from mengjiao-liu/controller-reconsider-log-verbosity
kube-controller-manager: readjust log verbosity
This commit is contained in:
		| @@ -26,7 +26,6 @@ import ( | ||||
|  | ||||
| 	"k8s.io/client-go/util/flowcontrol" | ||||
| 	"k8s.io/controller-manager/controller" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/kubernetes/cmd/kube-controller-manager/names" | ||||
| 	"k8s.io/kubernetes/pkg/controller/daemon" | ||||
| 	"k8s.io/kubernetes/pkg/controller/deployment" | ||||
| @@ -87,7 +86,7 @@ func newReplicaSetControllerDescriptor() *ControllerDescriptor { | ||||
|  | ||||
| func startReplicaSetController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { | ||||
| 	go replicaset.NewReplicaSetController( | ||||
| 		klog.FromContext(ctx), | ||||
| 		ctx, | ||||
| 		controllerContext.InformerFactory.Apps().V1().ReplicaSets(), | ||||
| 		controllerContext.InformerFactory.Core().V1().Pods(), | ||||
| 		controllerContext.ClientBuilder.ClientOrDie("replicaset-controller"), | ||||
|   | ||||
| @@ -79,6 +79,7 @@ func startHPAControllerWithMetricsClient(ctx context.Context, controllerContext | ||||
| 	} | ||||
|  | ||||
| 	go podautoscaler.NewHorizontalController( | ||||
| 		ctx, | ||||
| 		hpaClient.CoreV1(), | ||||
| 		scaleClient, | ||||
| 		hpaClient.AutoscalingV2(), | ||||
|   | ||||
| @@ -375,7 +375,7 @@ func startPersistentVolumeAttachDetachController(ctx context.Context, controller | ||||
| 	ctx = klog.NewContext(ctx, logger) | ||||
| 	attachDetachController, attachDetachControllerErr := | ||||
| 		attachdetach.NewAttachDetachController( | ||||
| 			logger, | ||||
| 			ctx, | ||||
| 			controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"), | ||||
| 			controllerContext.InformerFactory.Core().V1().Pods(), | ||||
| 			controllerContext.InformerFactory.Core().V1().Nodes(), | ||||
| @@ -416,6 +416,7 @@ func startPersistentVolumeExpanderController(ctx context.Context, controllerCont | ||||
| 	csiTranslator := csitrans.New() | ||||
|  | ||||
| 	expandController, expandControllerErr := expand.NewExpandController( | ||||
| 		ctx, | ||||
| 		controllerContext.ClientBuilder.ClientOrDie("expand-controller"), | ||||
| 		controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		controllerContext.Cloud, | ||||
| @@ -441,6 +442,7 @@ func newEphemeralVolumeControllerDescriptor() *ControllerDescriptor { | ||||
|  | ||||
| func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { | ||||
| 	ephemeralController, err := ephemeral.NewController( | ||||
| 		ctx, | ||||
| 		controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"), | ||||
| 		controllerContext.InformerFactory.Core().V1().Pods(), | ||||
| 		controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims()) | ||||
| @@ -489,6 +491,7 @@ func newEndpointsControllerDescriptor() *ControllerDescriptor { | ||||
|  | ||||
| func startEndpointsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { | ||||
| 	go endpointcontroller.NewEndpointController( | ||||
| 		ctx, | ||||
| 		controllerContext.InformerFactory.Core().V1().Pods(), | ||||
| 		controllerContext.InformerFactory.Core().V1().Services(), | ||||
| 		controllerContext.InformerFactory.Core().V1().Endpoints(), | ||||
| @@ -508,7 +511,7 @@ func newReplicationControllerDescriptor() *ControllerDescriptor { | ||||
|  | ||||
| func startReplicationController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { | ||||
| 	go replicationcontroller.NewReplicationManager( | ||||
| 		klog.FromContext(ctx), | ||||
| 		ctx, | ||||
| 		controllerContext.InformerFactory.Core().V1().Pods(), | ||||
| 		controllerContext.InformerFactory.Core().V1().ReplicationControllers(), | ||||
| 		controllerContext.ClientBuilder.ClientOrDie("replication-controller"), | ||||
| @@ -686,6 +689,7 @@ func startGarbageCollectorController(ctx context.Context, controllerContext Cont | ||||
| 		ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{} | ||||
| 	} | ||||
| 	garbageCollector, err := garbagecollector.NewGarbageCollector( | ||||
| 		ctx, | ||||
| 		gcClientset, | ||||
| 		metadataClient, | ||||
| 		controllerContext.RESTMapper, | ||||
|   | ||||
| @@ -39,6 +39,7 @@ func newServiceCIDRsControllerDescriptor() *ControllerDescriptor { | ||||
| } | ||||
| func startServiceCIDRsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) { | ||||
| 	go servicecidrs.NewController( | ||||
| 		ctx, | ||||
| 		controllerContext.InformerFactory.Networking().V1alpha1().ServiceCIDRs(), | ||||
| 		controllerContext.InformerFactory.Networking().V1alpha1().IPAddresses(), | ||||
| 		controllerContext.ClientBuilder.ClientOrDie("service-cidrs-controller"), | ||||
|   | ||||
| @@ -82,7 +82,7 @@ type ControllerV2 struct { | ||||
| // NewControllerV2 creates and initializes a new Controller. | ||||
| func NewControllerV2(ctx context.Context, jobInformer batchv1informers.JobInformer, cronJobsInformer batchv1informers.CronJobInformer, kubeClient clientset.Interface) (*ControllerV2, error) { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
|  | ||||
| 	jm := &ControllerV2{ | ||||
| 		queue:       workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cronjob"), | ||||
| @@ -129,7 +129,7 @@ func (jm *ControllerV2) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start event processing pipeline. | ||||
| 	jm.broadcaster.StartStructuredLogging(0) | ||||
| 	jm.broadcaster.StartStructuredLogging(3) | ||||
| 	jm.broadcaster.StartRecordingToSink(&covev1client.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")}) | ||||
| 	defer jm.broadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -138,7 +138,7 @@ func NewDaemonSetsController( | ||||
| 	kubeClient clientset.Interface, | ||||
| 	failedPodsBackoff *flowcontrol.Backoff, | ||||
| ) (*DaemonSetsController, error) { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	dsc := &DaemonSetsController{ | ||||
| 		kubeClient:       kubeClient, | ||||
| @@ -279,7 +279,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(logger klog.Logger, obj interfa | ||||
| func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	dsc.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	dsc.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	dsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dsc.kubeClient.CoreV1().Events("")}) | ||||
| 	defer dsc.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -99,7 +99,7 @@ type DeploymentController struct { | ||||
|  | ||||
| // NewDeploymentController creates a new DeploymentController. | ||||
| func NewDeploymentController(ctx context.Context, dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	dc := &DeploymentController{ | ||||
| 		client:           client, | ||||
| @@ -158,7 +158,7 @@ func (dc *DeploymentController) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	dc.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	dc.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	dc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.client.CoreV1().Events("")}) | ||||
| 	defer dc.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -181,7 +181,7 @@ func NewDisruptionControllerInternal(ctx context.Context, | ||||
| 		queue:                     workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "disruption"), workqueue.DefaultControllerRateLimiter()), | ||||
| 		recheckQueue:              workqueue.NewDelayingQueueWithCustomClock(clock, "disruption_recheck"), | ||||
| 		stalePodDisruptionQueue:   workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "stale_pod_disruption"), workqueue.DefaultControllerRateLimiter()), | ||||
| 		broadcaster:               record.NewBroadcaster(), | ||||
| 		broadcaster:               record.NewBroadcaster(record.WithContext(ctx)), | ||||
| 		stalePodDisruptionTimeout: stalePodDisruptionTimeout, | ||||
| 	} | ||||
| 	dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"}) | ||||
|   | ||||
| @@ -70,9 +70,9 @@ const ( | ||||
| ) | ||||
|  | ||||
| // NewEndpointController returns a new *Controller. | ||||
| func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer, | ||||
| func NewEndpointController(ctx context.Context, podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer, | ||||
| 	endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface, endpointUpdatesBatchPeriod time.Duration) *Controller { | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"}) | ||||
|  | ||||
| 	e := &Controller{ | ||||
| @@ -164,7 +164,7 @@ func (e *Controller) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	e.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	e.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	e.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: e.client.CoreV1().Events("")}) | ||||
| 	defer e.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -43,6 +43,7 @@ import ( | ||||
| 	endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints" | ||||
| 	api "k8s.io/kubernetes/pkg/apis/core" | ||||
| 	controllerpkg "k8s.io/kubernetes/pkg/controller" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	utilnet "k8s.io/utils/net" | ||||
| 	"k8s.io/utils/pointer" | ||||
| ) | ||||
| @@ -209,10 +210,10 @@ type endpointController struct { | ||||
| 	endpointsStore cache.Store | ||||
| } | ||||
|  | ||||
| func newController(url string, batchPeriod time.Duration) *endpointController { | ||||
| func newController(ctx context.Context, url string, batchPeriod time.Duration) *endpointController { | ||||
| 	client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) | ||||
| 	informerFactory := informers.NewSharedInformerFactory(client, controllerpkg.NoResyncPeriodFunc()) | ||||
| 	endpoints := NewEndpointController(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Services(), | ||||
| 	endpoints := NewEndpointController(ctx, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Services(), | ||||
| 		informerFactory.Core().V1().Endpoints(), client, batchPeriod) | ||||
| 	endpoints.podsSynced = alwaysReady | ||||
| 	endpoints.servicesSynced = alwaysReady | ||||
| @@ -225,11 +226,12 @@ func newController(url string, batchPeriod time.Duration) *endpointController { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func newFakeController(batchPeriod time.Duration) (*fake.Clientset, *endpointController) { | ||||
| func newFakeController(ctx context.Context, batchPeriod time.Duration) (*fake.Clientset, *endpointController) { | ||||
| 	client := fake.NewSimpleClientset() | ||||
| 	informerFactory := informers.NewSharedInformerFactory(client, controllerpkg.NoResyncPeriodFunc()) | ||||
|  | ||||
| 	eController := NewEndpointController( | ||||
| 		ctx, | ||||
| 		informerFactory.Core().V1().Pods(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		informerFactory.Core().V1().Endpoints(), | ||||
| @@ -252,7 +254,9 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -268,7 +272,10 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, | ||||
| 		Spec:       v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 0) | ||||
| } | ||||
|  | ||||
| @@ -276,7 +283,9 @@ func TestSyncEndpointsExistingNilSubsets(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -292,7 +301,10 @@ func TestSyncEndpointsExistingNilSubsets(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 0) | ||||
| } | ||||
|  | ||||
| @@ -300,7 +312,9 @@ func TestSyncEndpointsExistingEmptySubsets(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -316,7 +330,10 @@ func TestSyncEndpointsExistingEmptySubsets(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 0) | ||||
| } | ||||
|  | ||||
| @@ -326,7 +343,8 @@ func TestSyncEndpointsWithPodResourceVersionUpdateOnly(t *testing.T) { | ||||
| 	defer testServer.Close() | ||||
| 	pod0 := testPod(ns, 0, 1, true, ipv4only) | ||||
| 	pod1 := testPod(ns, 1, 1, false, ipv4only) | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -362,7 +380,11 @@ func TestSyncEndpointsWithPodResourceVersionUpdateOnly(t *testing.T) { | ||||
| 	pod1.ResourceVersion = "4" | ||||
| 	endpoints.podStore.Add(pod0) | ||||
| 	endpoints.podStore.Add(pod1) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 0) | ||||
| } | ||||
|  | ||||
| @@ -370,7 +392,8 @@ func TestSyncEndpointsNewNoSubsets(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.serviceStore.Add(&v1.Service{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, | ||||
| 		Spec: v1.ServiceSpec{ | ||||
| @@ -378,7 +401,10 @@ func TestSyncEndpointsNewNoSubsets(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| } | ||||
|  | ||||
| @@ -386,7 +412,9 @@ func TestCheckLeftoverEndpoints(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, _ := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -412,7 +440,8 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -432,7 +461,10 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -456,7 +488,8 @@ func TestSyncEndpointsHeadlessServiceLabel(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -475,7 +508,11 @@ func TestSyncEndpointsHeadlessServiceLabel(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 0) | ||||
| } | ||||
|  | ||||
| @@ -534,12 +571,13 @@ func TestSyncServiceExternalNameType(t *testing.T) { | ||||
| 			testServer, endpointsHandler := makeTestServer(t, namespace) | ||||
|  | ||||
| 			defer testServer.Close() | ||||
| 			endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 			err := endpoints.serviceStore.Add(tc.service) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Error adding service to service store: %v", err) | ||||
| 			} | ||||
| 			err = endpoints.syncService(context.TODO(), namespace+"/"+serviceName) | ||||
| 			err = endpoints.syncService(tCtx, namespace+"/"+serviceName) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Error syncing service: %v", err) | ||||
| 			} | ||||
| @@ -552,7 +590,8 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -572,7 +611,10 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "UDP"}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -596,7 +638,8 @@ func TestSyncEndpointsProtocolSCTP(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -616,7 +659,10 @@ func TestSyncEndpointsProtocolSCTP(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "SCTP"}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -640,7 +686,8 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -657,7 +704,10 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| @@ -680,7 +730,9 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -697,7 +749,10 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| @@ -720,7 +775,9 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -737,7 +794,10 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| @@ -761,7 +821,8 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { | ||||
| 	ns := "bar" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -781,7 +842,10 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| @@ -804,7 +868,8 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			ResourceVersion: "1", | ||||
| @@ -824,7 +889,10 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 0) | ||||
| } | ||||
|  | ||||
| @@ -832,7 +900,8 @@ func TestSyncEndpointsItems(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	addPods(endpoints.podStore, ns, 3, 2, 0, ipv4only) | ||||
| 	addPods(endpoints.podStore, "blah", 5, 2, 0, ipv4only) // make sure these aren't found! | ||||
|  | ||||
| @@ -846,7 +915,10 @@ func TestSyncEndpointsItems(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), "other/foo") | ||||
| 	err := endpoints.syncService(tCtx, "other/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	expectedSubsets := []v1.EndpointSubset{{ | ||||
| 		Addresses: []v1.EndpointAddress{ | ||||
| @@ -877,7 +949,8 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	addPods(endpoints.podStore, ns, 3, 2, 0, ipv4only) | ||||
| 	serviceLabels := map[string]string{"foo": "bar"} | ||||
| 	endpoints.serviceStore.Add(&v1.Service{ | ||||
| @@ -894,7 +967,10 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	expectedSubsets := []v1.EndpointSubset{{ | ||||
| 		Addresses: []v1.EndpointAddress{ | ||||
| @@ -925,7 +1001,8 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { | ||||
| 	ns := "bar" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -953,7 +1030,10 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	serviceLabels[v1.IsHeadlessService] = "" | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -989,7 +1069,8 @@ func TestWaitsForAllInformersToBeSynced2(t *testing.T) { | ||||
| 			ns := "other" | ||||
| 			testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 			defer testServer.Close() | ||||
| 			endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 			addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only) | ||||
|  | ||||
| 			service := &v1.Service{ | ||||
| @@ -1005,9 +1086,7 @@ func TestWaitsForAllInformersToBeSynced2(t *testing.T) { | ||||
| 			endpoints.servicesSynced = test.servicesSynced | ||||
| 			endpoints.endpointsSynced = test.endpointsSynced | ||||
| 			endpoints.workerLoopPeriod = 10 * time.Millisecond | ||||
| 			stopCh := make(chan struct{}) | ||||
| 			defer close(stopCh) | ||||
| 			go endpoints.Run(context.TODO(), 1) | ||||
| 			go endpoints.Run(tCtx, 1) | ||||
|  | ||||
| 			// cache.WaitForNamedCacheSync has a 100ms poll period, and the endpoints worker has a 10ms period. | ||||
| 			// To ensure we get all updates, including unexpected ones, we need to wait at least as long as | ||||
| @@ -1030,7 +1109,8 @@ func TestSyncEndpointsHeadlessService(t *testing.T) { | ||||
| 	ns := "headless" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1053,7 +1133,10 @@ func TestSyncEndpointsHeadlessService(t *testing.T) { | ||||
| 	} | ||||
| 	originalService := service.DeepCopy() | ||||
| 	endpoints.serviceStore.Add(service) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1080,7 +1163,9 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseFail | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1100,7 +1185,10 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseFail | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1119,7 +1207,9 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseSucc | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1139,7 +1229,10 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseSucc | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1158,7 +1251,9 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhase | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1178,7 +1273,11 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhase | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1197,7 +1296,8 @@ func TestSyncEndpointsHeadlessWithoutPort(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.serviceStore.Add(&v1.Service{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, | ||||
| 		Spec: v1.ServiceSpec{ | ||||
| @@ -1207,7 +1307,11 @@ func TestSyncEndpointsHeadlessWithoutPort(t *testing.T) { | ||||
| 		}, | ||||
| 	}) | ||||
| 	addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
|  | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| @@ -1429,7 +1533,9 @@ func TestLastTriggerChangeTimeAnnotation(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1449,7 +1555,10 @@ func TestLastTriggerChangeTimeAnnotation(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -1476,7 +1585,9 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationOverridden(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1499,7 +1610,10 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationOverridden(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -1526,7 +1640,9 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationCleared(t *testing.T) { | ||||
| 	ns := "other" | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0*time.Second) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1550,7 +1666,10 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationCleared(t *testing.T) { | ||||
| 			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
|  | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{ | ||||
| @@ -1671,15 +1790,15 @@ func TestPodUpdatesBatching(t *testing.T) { | ||||
| 			resourceVersion := 1 | ||||
| 			testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 			defer testServer.Close() | ||||
| 			endpoints := newController(testServer.URL, tc.batchPeriod) | ||||
| 			stopCh := make(chan struct{}) | ||||
| 			defer close(stopCh) | ||||
|  | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			endpoints := newController(tCtx, testServer.URL, tc.batchPeriod) | ||||
| 			endpoints.podsSynced = alwaysReady | ||||
| 			endpoints.servicesSynced = alwaysReady | ||||
| 			endpoints.endpointsSynced = alwaysReady | ||||
| 			endpoints.workerLoopPeriod = 10 * time.Millisecond | ||||
|  | ||||
| 			go endpoints.Run(context.TODO(), 1) | ||||
| 			go endpoints.Run(tCtx, 1) | ||||
|  | ||||
| 			addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only) | ||||
|  | ||||
| @@ -1794,15 +1913,15 @@ func TestPodAddsBatching(t *testing.T) { | ||||
| 			ns := "other" | ||||
| 			testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 			defer testServer.Close() | ||||
| 			endpoints := newController(testServer.URL, tc.batchPeriod) | ||||
| 			stopCh := make(chan struct{}) | ||||
| 			defer close(stopCh) | ||||
|  | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			endpoints := newController(tCtx, testServer.URL, tc.batchPeriod) | ||||
| 			endpoints.podsSynced = alwaysReady | ||||
| 			endpoints.servicesSynced = alwaysReady | ||||
| 			endpoints.endpointsSynced = alwaysReady | ||||
| 			endpoints.workerLoopPeriod = 10 * time.Millisecond | ||||
|  | ||||
| 			go endpoints.Run(context.TODO(), 1) | ||||
| 			go endpoints.Run(tCtx, 1) | ||||
|  | ||||
| 			endpoints.serviceStore.Add(&v1.Service{ | ||||
| 				ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, | ||||
| @@ -1916,15 +2035,15 @@ func TestPodDeleteBatching(t *testing.T) { | ||||
| 			ns := "other" | ||||
| 			testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 			defer testServer.Close() | ||||
| 			endpoints := newController(testServer.URL, tc.batchPeriod) | ||||
| 			stopCh := make(chan struct{}) | ||||
| 			defer close(stopCh) | ||||
|  | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			endpoints := newController(tCtx, testServer.URL, tc.batchPeriod) | ||||
| 			endpoints.podsSynced = alwaysReady | ||||
| 			endpoints.servicesSynced = alwaysReady | ||||
| 			endpoints.endpointsSynced = alwaysReady | ||||
| 			endpoints.workerLoopPeriod = 10 * time.Millisecond | ||||
|  | ||||
| 			go endpoints.Run(context.TODO(), 1) | ||||
| 			go endpoints.Run(tCtx, 1) | ||||
|  | ||||
| 			addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only) | ||||
|  | ||||
| @@ -1960,7 +2079,9 @@ func TestSyncEndpointsServiceNotFound(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, endpointsHandler := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	endpoints := newController(testServer.URL, 0) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	endpoints := newController(tCtx, testServer.URL, 0) | ||||
| 	endpoints.endpointsStore.Add(&v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:            "foo", | ||||
| @@ -1968,7 +2089,10 @@ func TestSyncEndpointsServiceNotFound(t *testing.T) { | ||||
| 			ResourceVersion: "1", | ||||
| 		}, | ||||
| 	}) | ||||
| 	endpoints.syncService(context.TODO(), ns+"/foo") | ||||
| 	err := endpoints.syncService(tCtx, ns+"/foo") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error syncing service %v", err) | ||||
| 	} | ||||
| 	endpointsHandler.ValidateRequestCount(t, 1) | ||||
| 	endpointsHandler.ValidateRequest(t, "/api/v1/namespaces/"+ns+"/endpoints/foo", "DELETE", nil) | ||||
| } | ||||
| @@ -2058,8 +2182,9 @@ func TestSyncServiceOverCapacity(t *testing.T) { | ||||
|  | ||||
| 	for _, tc := range testCases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			ns := "test" | ||||
| 			client, c := newFakeController(0 * time.Second) | ||||
| 			client, c := newFakeController(tCtx, 0*time.Second) | ||||
|  | ||||
| 			addPods(c.podStore, ns, tc.numDesired, 1, tc.numDesiredNotReady, ipv4only) | ||||
| 			pods := c.podStore.List() | ||||
| @@ -2092,11 +2217,17 @@ func TestSyncServiceOverCapacity(t *testing.T) { | ||||
| 				endpoints.Annotations[v1.EndpointsOverCapacity] = *tc.startingAnnotation | ||||
| 			} | ||||
| 			c.endpointsStore.Add(endpoints) | ||||
| 			client.CoreV1().Endpoints(ns).Create(context.TODO(), endpoints, metav1.CreateOptions{}) | ||||
| 			_, err := client.CoreV1().Endpoints(ns).Create(tCtx, endpoints, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("unexpected error creating endpoints: %v", err) | ||||
| 			} | ||||
|  | ||||
| 			c.syncService(context.TODO(), fmt.Sprintf("%s/%s", ns, svc.Name)) | ||||
| 			err = c.syncService(tCtx, fmt.Sprintf("%s/%s", ns, svc.Name)) | ||||
| 			if err != nil { | ||||
| 				t.Errorf("Unexpected error syncing service %v", err) | ||||
| 			} | ||||
|  | ||||
| 			actualEndpoints, err := client.CoreV1().Endpoints(ns).Get(context.TODO(), endpoints.Name, metav1.GetOptions{}) | ||||
| 			actualEndpoints, err := client.CoreV1().Endpoints(ns).Get(tCtx, endpoints.Name, metav1.GetOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("unexpected error getting endpoints: %v", err) | ||||
| 			} | ||||
| @@ -2250,10 +2381,11 @@ func TestMultipleServiceChanges(t *testing.T) { | ||||
| 	testServer := makeBlockingEndpointDeleteTestServer(t, controller, endpoint, blockDelete, blockNextAction, ns) | ||||
| 	defer testServer.Close() | ||||
|  | ||||
| 	*controller = *newController(testServer.URL, 0*time.Second) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	*controller = *newController(tCtx, testServer.URL, 0*time.Second) | ||||
| 	addPods(controller.podStore, ns, 1, 1, 0, ipv4only) | ||||
|  | ||||
| 	go func() { controller.Run(context.TODO(), 1) }() | ||||
| 	go func() { controller.Run(tCtx, 1) }() | ||||
|  | ||||
| 	svc := &v1.Service{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns}, | ||||
| @@ -2481,8 +2613,10 @@ func TestSyncServiceAddresses(t *testing.T) { | ||||
|  | ||||
| 	for _, tc := range testCases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			tCtx := ktesting.Init(t) | ||||
|  | ||||
| 			ns := tc.service.Namespace | ||||
| 			client, c := newFakeController(0 * time.Second) | ||||
| 			client, c := newFakeController(tCtx, 0*time.Second) | ||||
|  | ||||
| 			err := c.podStore.Add(tc.pod) | ||||
| 			if err != nil { | ||||
| @@ -2492,12 +2626,12 @@ func TestSyncServiceAddresses(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Errorf("Unexpected error adding service %v", err) | ||||
| 			} | ||||
| 			err = c.syncService(context.TODO(), fmt.Sprintf("%s/%s", ns, tc.service.Name)) | ||||
| 			err = c.syncService(tCtx, fmt.Sprintf("%s/%s", ns, tc.service.Name)) | ||||
| 			if err != nil { | ||||
| 				t.Errorf("Unexpected error syncing service %v", err) | ||||
| 			} | ||||
|  | ||||
| 			endpoints, err := client.CoreV1().Endpoints(ns).Get(context.TODO(), tc.service.Name, metav1.GetOptions{}) | ||||
| 			endpoints, err := client.CoreV1().Endpoints(ns).Get(tCtx, tc.service.Name, metav1.GetOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Errorf("Unexpected error %v", err) | ||||
| 			} | ||||
| @@ -2524,7 +2658,9 @@ func TestEndpointsDeletionEvents(t *testing.T) { | ||||
| 	ns := metav1.NamespaceDefault | ||||
| 	testServer, _ := makeTestServer(t, ns) | ||||
| 	defer testServer.Close() | ||||
| 	controller := newController(testServer.URL, 0) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	controller := newController(tCtx, testServer.URL, 0) | ||||
| 	store := controller.endpointsStore | ||||
| 	ep1 := &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
|   | ||||
| @@ -86,7 +86,7 @@ func NewController(ctx context.Context, podInformer coreinformers.PodInformer, | ||||
| 	client clientset.Interface, | ||||
| 	endpointUpdatesBatchPeriod time.Duration, | ||||
| ) *Controller { | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"}) | ||||
|  | ||||
| 	endpointslicemetrics.RegisterMetrics() | ||||
|   | ||||
| @@ -76,7 +76,7 @@ func NewController(ctx context.Context, endpointsInformer coreinformers.Endpoint | ||||
| 	endpointUpdatesBatchPeriod time.Duration, | ||||
| ) *Controller { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-mirroring-controller"}) | ||||
|  | ||||
| 	metrics.RegisterMetrics() | ||||
|   | ||||
| @@ -29,9 +29,9 @@ import ( | ||||
| 	"k8s.io/client-go/tools/record" | ||||
| 	"k8s.io/component-base/metrics/testutil" | ||||
| 	endpointsliceutil "k8s.io/endpointslice/util" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	endpointsv1 "k8s.io/kubernetes/pkg/api/v1/endpoints" | ||||
| 	"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	"k8s.io/utils/pointer" | ||||
| ) | ||||
|  | ||||
| @@ -1007,6 +1007,7 @@ func TestReconcile(t *testing.T) { | ||||
|  | ||||
| 	for _, tc := range testCases { | ||||
| 		t.Run(tc.testName, func(t *testing.T) { | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			client := newClientset() | ||||
| 			setupMetrics() | ||||
| 			namespace := "test" | ||||
| @@ -1037,7 +1038,7 @@ func TestReconcile(t *testing.T) { | ||||
| 			if maxEndpointsPerSubset == 0 { | ||||
| 				maxEndpointsPerSubset = defaultMaxEndpointsPerSubset | ||||
| 			} | ||||
| 			r := newReconciler(client, maxEndpointsPerSubset) | ||||
| 			r := newReconciler(tCtx, client, maxEndpointsPerSubset) | ||||
| 			reconcileHelper(t, r, &endpoints, tc.existingEndpointSlices) | ||||
|  | ||||
| 			numExtraActions := len(client.Actions()) - numInitialActions | ||||
| @@ -1057,8 +1058,8 @@ func TestReconcile(t *testing.T) { | ||||
|  | ||||
| // Test Helpers | ||||
|  | ||||
| func newReconciler(client *fake.Clientset, maxEndpointsPerSubset int32) *reconciler { | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| func newReconciler(ctx context.Context, client *fake.Clientset, maxEndpointsPerSubset int32) *reconciler { | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "endpoint-slice-mirroring-controller"}) | ||||
|  | ||||
| 	return &reconciler{ | ||||
|   | ||||
| @@ -85,6 +85,7 @@ var _ controller.Debuggable = (*GarbageCollector)(nil) | ||||
|  | ||||
| // NewGarbageCollector creates a new GarbageCollector. | ||||
| func NewGarbageCollector( | ||||
| 	ctx context.Context, | ||||
| 	kubeClient clientset.Interface, | ||||
| 	metadataClient metadata.Interface, | ||||
| 	mapper meta.ResettableRESTMapper, | ||||
| @@ -93,7 +94,7 @@ func NewGarbageCollector( | ||||
| 	informersStarted <-chan struct{}, | ||||
| ) (*GarbageCollector, error) { | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "garbage-collector-controller"}) | ||||
|  | ||||
| 	attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete") | ||||
| @@ -147,7 +148,7 @@ func (gc *GarbageCollector) Run(ctx context.Context, workers int) { | ||||
| 	defer gc.dependencyGraphBuilder.graphChanges.ShutDown() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	gc.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	gc.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	gc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gc.kubeClient.CoreV1().Events("")}) | ||||
| 	defer gc.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -30,7 +30,6 @@ import ( | ||||
| 	"golang.org/x/time/rate" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
|  | ||||
| 	"github.com/golang/groupcache/lru" | ||||
| 	"github.com/google/go-cmp/cmp" | ||||
| @@ -64,6 +63,7 @@ import ( | ||||
| 	"k8s.io/controller-manager/pkg/informerfactory" | ||||
| 	"k8s.io/kubernetes/pkg/api/legacyscheme" | ||||
| 	c "k8s.io/kubernetes/pkg/controller" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| type testRESTMapper struct { | ||||
| @@ -98,15 +98,14 @@ func TestGarbageCollectorConstruction(t *testing.T) { | ||||
| 	// construction will not fail. | ||||
| 	alwaysStarted := make(chan struct{}) | ||||
| 	close(alwaysStarted) | ||||
| 	gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{}, | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	gc, err := NewGarbageCollector(tCtx, client, metadataClient, rm, map[schema.GroupResource]struct{}{}, | ||||
| 		informerfactory.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors)) | ||||
|  | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
|  | ||||
| 	// Make sure resource monitor syncing creates and stops resource monitors. | ||||
| 	tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil) | ||||
| 	err = gc.resyncMonitors(logger, twoResources) | ||||
| @@ -121,10 +120,7 @@ func TestGarbageCollectorConstruction(t *testing.T) { | ||||
| 	} | ||||
| 	assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors)) | ||||
|  | ||||
| 	// Make sure the syncing mechanism also works after Run() has been called | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	go gc.Run(ctx, 1) | ||||
| 	go gc.Run(tCtx, 1) | ||||
|  | ||||
| 	err = gc.resyncMonitors(logger, twoResources) | ||||
| 	if err != nil { | ||||
| @@ -212,6 +208,7 @@ type garbageCollector struct { | ||||
| } | ||||
|  | ||||
| func setupGC(t *testing.T, config *restclient.Config) garbageCollector { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	metadataClient, err := metadata.NewForConfig(config) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| @@ -221,7 +218,7 @@ func setupGC(t *testing.T, config *restclient.Config) garbageCollector { | ||||
| 	sharedInformers := informers.NewSharedInformerFactory(client, 0) | ||||
| 	alwaysStarted := make(chan struct{}) | ||||
| 	close(alwaysStarted) | ||||
| 	gc, err := NewGarbageCollector(client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted) | ||||
| 	gc, err := NewGarbageCollector(ctx, client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| @@ -886,17 +883,17 @@ func TestGarbageCollectorSync(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	sharedInformers := informers.NewSharedInformerFactory(client, 0) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	alwaysStarted := make(chan struct{}) | ||||
| 	close(alwaysStarted) | ||||
| 	gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted) | ||||
| 	gc, err := NewGarbageCollector(tCtx, client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	go gc.Run(ctx, 1) | ||||
| 	go gc.Run(tCtx, 1) | ||||
| 	// The pseudo-code of GarbageCollector.Sync(): | ||||
| 	// GarbageCollector.Sync(client, period, stopCh): | ||||
| 	//    wait.Until() loops with `period` until the `stopCh` is closed : | ||||
| @@ -911,7 +908,7 @@ func TestGarbageCollectorSync(t *testing.T) { | ||||
| 	// The 1s sleep in the test allows GetDeletableResources and | ||||
| 	// gc.resyncMonitors to run ~5 times to ensure the changes to the | ||||
| 	// fakeDiscoveryClient are picked up. | ||||
| 	go gc.Sync(ctx, fakeDiscoveryClient, 200*time.Millisecond) | ||||
| 	go gc.Sync(tCtx, fakeDiscoveryClient, 200*time.Millisecond) | ||||
|  | ||||
| 	// Wait until the sync discovers the initial resources | ||||
| 	time.Sleep(1 * time.Second) | ||||
|   | ||||
| @@ -148,7 +148,7 @@ func NewController(ctx context.Context, podInformer coreinformers.PodInformer, j | ||||
| } | ||||
|  | ||||
| func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) (*Controller, error) { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	logger := klog.FromContext(ctx) | ||||
|  | ||||
| 	jm := &Controller{ | ||||
| @@ -214,7 +214,7 @@ func (jm *Controller) Run(ctx context.Context, workers int) { | ||||
| 	logger := klog.FromContext(ctx) | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	jm.broadcaster.StartStructuredLogging(0) | ||||
| 	jm.broadcaster.StartStructuredLogging(3) | ||||
| 	jm.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")}) | ||||
| 	defer jm.broadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -48,8 +48,8 @@ type adapter struct { | ||||
| 	recorder    record.EventRecorder | ||||
| } | ||||
|  | ||||
| func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter { | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| func newAdapter(ctx context.Context, k8s clientset.Interface, cloud *gce.Cloud) *adapter { | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
|  | ||||
| 	ret := &adapter{ | ||||
| 		k8s:         k8s, | ||||
| @@ -65,7 +65,7 @@ func (a *adapter) Run(ctx context.Context) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start event processing pipeline. | ||||
| 	a.broadcaster.StartStructuredLogging(0) | ||||
| 	a.broadcaster.StartStructuredLogging(3) | ||||
| 	a.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: a.k8s.CoreV1().Events("")}) | ||||
| 	defer a.broadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -124,9 +124,9 @@ func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovide | ||||
|  | ||||
| 	switch allocatorType { | ||||
| 	case RangeAllocatorType: | ||||
| 		return NewCIDRRangeAllocator(logger, kubeClient, nodeInformer, allocatorParams, nodeList) | ||||
| 		return NewCIDRRangeAllocator(ctx, kubeClient, nodeInformer, allocatorParams, nodeList) | ||||
| 	case CloudAllocatorType: | ||||
| 		return NewCloudCIDRAllocator(logger, kubeClient, cloud, nodeInformer) | ||||
| 		return NewCloudCIDRAllocator(ctx, kubeClient, cloud, nodeInformer) | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType) | ||||
| 	} | ||||
|   | ||||
| @@ -87,13 +87,14 @@ type cloudCIDRAllocator struct { | ||||
| var _ CIDRAllocator = (*cloudCIDRAllocator)(nil) | ||||
|  | ||||
| // NewCloudCIDRAllocator creates a new cloud CIDR allocator. | ||||
| func NewCloudCIDRAllocator(logger klog.Logger, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) { | ||||
| func NewCloudCIDRAllocator(ctx context.Context, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	if client == nil { | ||||
| 		logger.Error(nil, "kubeClient is nil when starting cloud CIDR allocator") | ||||
| 		klog.FlushAndExit(klog.ExitFlushTimeout, 1) | ||||
| 	} | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) | ||||
|  | ||||
| 	gceCloud, ok := cloud.(*gce.Cloud) | ||||
| @@ -143,7 +144,7 @@ func (ca *cloudCIDRAllocator) Run(ctx context.Context) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start event processing pipeline. | ||||
| 	ca.broadcaster.StartStructuredLogging(0) | ||||
| 	ca.broadcaster.StartStructuredLogging(3) | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	logger.Info("Sending events to api server") | ||||
| 	ca.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ca.client.CoreV1().Events("")}) | ||||
|   | ||||
| @@ -20,15 +20,15 @@ limitations under the License. | ||||
| package ipam | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
|  | ||||
| 	informers "k8s.io/client-go/informers/core/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	cloudprovider "k8s.io/cloud-provider" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // NewCloudCIDRAllocator creates a new cloud CIDR allocator. | ||||
| func NewCloudCIDRAllocator(logger klog.Logger, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) { | ||||
| func NewCloudCIDRAllocator(ctx context.Context, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) { | ||||
| 	return nil, errors.New("legacy cloud provider support not built") | ||||
| } | ||||
|   | ||||
| @@ -67,6 +67,7 @@ type Controller struct { | ||||
|  | ||||
| // NewController returns a new instance of the IPAM controller. | ||||
| func NewController( | ||||
| 	ctx context.Context, | ||||
| 	config *Config, | ||||
| 	kubeClient clientset.Interface, | ||||
| 	cloud cloudprovider.Interface, | ||||
| @@ -89,7 +90,7 @@ func NewController( | ||||
|  | ||||
| 	c := &Controller{ | ||||
| 		config:  config, | ||||
| 		adapter: newAdapter(kubeClient, gceCloud), | ||||
| 		adapter: newAdapter(ctx, kubeClient, gceCloud), | ||||
| 		syncers: make(map[string]*nodesync.NodeSync), | ||||
| 		set:     set, | ||||
| 	} | ||||
|   | ||||
| @@ -67,13 +67,14 @@ type rangeAllocator struct { | ||||
| // Caller must always pass in a list of existing nodes so the new allocator. | ||||
| // Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..) | ||||
| // can initialize its CIDR map. NodeList is only nil in testing. | ||||
| func NewCIDRRangeAllocator(logger klog.Logger, client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) { | ||||
| func NewCIDRRangeAllocator(ctx context.Context, client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	if client == nil { | ||||
| 		logger.Error(nil, "kubeClient is nil when starting CIDRRangeAllocator") | ||||
| 		klog.FlushAndExit(klog.ExitFlushTimeout, 1) | ||||
| 	} | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) | ||||
|  | ||||
| 	// create a cidrSet for each cidr we operate on | ||||
| @@ -169,7 +170,7 @@ func (r *rangeAllocator) Run(ctx context.Context) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start event processing pipeline. | ||||
| 	r.broadcaster.StartStructuredLogging(0) | ||||
| 	r.broadcaster.StartStructuredLogging(3) | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	logger.Info("Sending events to api server") | ||||
| 	r.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: r.client.CoreV1().Events("")}) | ||||
|   | ||||
| @@ -17,7 +17,6 @@ limitations under the License. | ||||
| package ipam | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| @@ -26,9 +25,9 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" | ||||
| 	"k8s.io/kubernetes/pkg/controller/testutil" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	netutils "k8s.io/utils/net" | ||||
| ) | ||||
|  | ||||
| @@ -275,13 +274,13 @@ func TestOccupyPreExistingCIDR(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// test function | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	for _, tc := range testCases { | ||||
| 		t.Run(tc.description, func(t *testing.T) { | ||||
| 			// Initialize the range allocator. | ||||
| 			fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) | ||||
| 			nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) | ||||
| 			_, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) | ||||
| 			nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) | ||||
| 			_, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) | ||||
| 			if err == nil && tc.ctrlCreateFail { | ||||
| 				t.Fatalf("creating range allocator was expected to fail, but it did not") | ||||
| 			} | ||||
| @@ -510,12 +509,12 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// test function | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	testFunc := func(tc testCase) { | ||||
| 		fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) | ||||
| 		nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) | ||||
| 		nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{}) | ||||
| 		// Initialize the range allocator. | ||||
| 		allocator, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) | ||||
| 		allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) | ||||
| 		if err != nil { | ||||
| 			t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) | ||||
| 			return | ||||
| @@ -527,7 +526,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { | ||||
| 		} | ||||
| 		rangeAllocator.nodesSynced = test.AlwaysReady | ||||
| 		rangeAllocator.recorder = testutil.NewFakeRecorder() | ||||
| 		go allocator.Run(ctx) | ||||
| 		go allocator.Run(tCtx) | ||||
|  | ||||
| 		// this is a bit of white box testing | ||||
| 		// pre allocate the cidrs as per the test | ||||
| @@ -611,10 +610,10 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	testFunc := func(tc testCase) { | ||||
| 		// Initialize the range allocator. | ||||
| 		allocator, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) | ||||
| 		allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) | ||||
| 		if err != nil { | ||||
| 			t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) | ||||
| 		} | ||||
| @@ -625,7 +624,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { | ||||
| 		} | ||||
| 		rangeAllocator.nodesSynced = test.AlwaysReady | ||||
| 		rangeAllocator.recorder = testutil.NewFakeRecorder() | ||||
| 		go allocator.Run(ctx) | ||||
| 		go allocator.Run(tCtx) | ||||
|  | ||||
| 		// this is a bit of white box testing | ||||
| 		for setIdx, allocatedList := range tc.allocatedCIDRs { | ||||
| @@ -756,10 +755,10 @@ func TestReleaseCIDRSuccess(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	testFunc := func(tc releaseTestCase) { | ||||
| 		// Initialize the range allocator. | ||||
| 		allocator, _ := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) | ||||
| 		allocator, _ := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) | ||||
| 		rangeAllocator, ok := allocator.(*rangeAllocator) | ||||
| 		if !ok { | ||||
| 			t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) | ||||
| @@ -767,7 +766,7 @@ func TestReleaseCIDRSuccess(t *testing.T) { | ||||
| 		} | ||||
| 		rangeAllocator.nodesSynced = test.AlwaysReady | ||||
| 		rangeAllocator.recorder = testutil.NewFakeRecorder() | ||||
| 		go allocator.Run(ctx) | ||||
| 		go allocator.Run(tCtx) | ||||
|  | ||||
| 		// this is a bit of white box testing | ||||
| 		for setIdx, allocatedList := range tc.allocatedCIDRs { | ||||
|   | ||||
| @@ -20,6 +20,7 @@ limitations under the License. | ||||
| package nodeipam | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
|  | ||||
| @@ -33,7 +34,7 @@ import ( | ||||
| ) | ||||
|  | ||||
| func createLegacyIPAM( | ||||
| 	logger klog.Logger, | ||||
| 	ctx context.Context, | ||||
| 	ic *Controller, | ||||
| 	nodeInformer coreinformers.NodeInformer, | ||||
| 	cloud cloudprovider.Interface, | ||||
| @@ -59,10 +60,11 @@ func createLegacyIPAM( | ||||
| 	if len(clusterCIDRs) > 0 { | ||||
| 		cidr = clusterCIDRs[0] | ||||
| 	} | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	if len(clusterCIDRs) > 1 { | ||||
| 		logger.Info("Multiple cidrs were configured with FromCluster or FromCloud. cidrs except first one were discarded") | ||||
| 	} | ||||
| 	ipamc, err := ipam.NewController(cfg, kubeClient, cloud, cidr, serviceCIDR, nodeCIDRMaskSizes[0]) | ||||
| 	ipamc, err := ipam.NewController(ctx, cfg, kubeClient, cloud, cidr, serviceCIDR, nodeCIDRMaskSizes[0]) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("error creating ipam controller: %w", err) | ||||
| 	} | ||||
|   | ||||
| @@ -88,7 +88,6 @@ func NewNodeIpamController( | ||||
| 	nodeCIDRMaskSizes []int, | ||||
| 	allocatorType ipam.CIDRAllocatorType) (*Controller, error) { | ||||
|  | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	if kubeClient == nil { | ||||
| 		return nil, fmt.Errorf("kubeClient is nil when starting Controller") | ||||
| 	} | ||||
| @@ -110,7 +109,7 @@ func NewNodeIpamController( | ||||
| 	ic := &Controller{ | ||||
| 		cloud:                cloud, | ||||
| 		kubeClient:           kubeClient, | ||||
| 		eventBroadcaster:     record.NewBroadcaster(), | ||||
| 		eventBroadcaster:     record.NewBroadcaster(record.WithContext(ctx)), | ||||
| 		clusterCIDRs:         clusterCIDRs, | ||||
| 		serviceCIDR:          serviceCIDR, | ||||
| 		secondaryServiceCIDR: secondaryServiceCIDR, | ||||
| @@ -120,7 +119,7 @@ func NewNodeIpamController( | ||||
| 	// TODO: Abstract this check into a generic controller manager should run method. | ||||
| 	if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType { | ||||
| 		var err error | ||||
| 		ic.legacyIPAM, err = createLegacyIPAM(logger, ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSizes) | ||||
| 		ic.legacyIPAM, err = createLegacyIPAM(ctx, ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSizes) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| @@ -151,7 +150,7 @@ func (nc *Controller) Run(ctx context.Context) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start event processing pipeline. | ||||
| 	nc.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	nc.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	nc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: nc.kubeClient.CoreV1().Events("")}) | ||||
| 	defer nc.eventBroadcaster.Shutdown() | ||||
| 	klog.FromContext(ctx).Info("Starting ipam controller") | ||||
|   | ||||
| @@ -27,7 +27,6 @@ import ( | ||||
| 	coreinformers "k8s.io/client-go/informers/core/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	cloudprovider "k8s.io/cloud-provider" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| type fakeController struct { | ||||
| @@ -38,7 +37,7 @@ func (f *fakeController) Run(ctx context.Context) { | ||||
| } | ||||
|  | ||||
| func createLegacyIPAM( | ||||
| 	logger klog.Logger, | ||||
| 	ctx context.Context, | ||||
| 	ic *Controller, | ||||
| 	nodeInformer coreinformers.NodeInformer, | ||||
| 	cloud cloudprovider.Interface, | ||||
|   | ||||
| @@ -323,7 +323,7 @@ func NewNodeLifecycleController( | ||||
| 		klog.FlushAndExit(klog.ExitFlushTimeout, 1) | ||||
| 	} | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"}) | ||||
|  | ||||
| 	nc := &Controller{ | ||||
| @@ -454,7 +454,7 @@ func (nc *Controller) Run(ctx context.Context) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	nc.broadcaster.StartStructuredLogging(0) | ||||
| 	nc.broadcaster.StartStructuredLogging(3) | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	logger.Info("Sending events to api server") | ||||
| 	nc.broadcaster.StartRecordingToSink( | ||||
|   | ||||
| @@ -126,6 +126,7 @@ type HorizontalController struct { | ||||
|  | ||||
| // NewHorizontalController creates a new HorizontalController. | ||||
| func NewHorizontalController( | ||||
| 	ctx context.Context, | ||||
| 	evtNamespacer v1core.EventsGetter, | ||||
| 	scaleNamespacer scaleclient.ScalesGetter, | ||||
| 	hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter, | ||||
| @@ -140,8 +141,8 @@ func NewHorizontalController( | ||||
| 	delayOfInitialReadinessStatus time.Duration, | ||||
| 	containerResourceMetricsEnabled bool, | ||||
| ) *HorizontalController { | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| 	broadcaster.StartStructuredLogging(0) | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	broadcaster.StartStructuredLogging(3) | ||||
| 	broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")}) | ||||
| 	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"}) | ||||
|  | ||||
|   | ||||
| @@ -46,6 +46,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" | ||||
| 	"k8s.io/kubernetes/pkg/controller/podautoscaler/monitor" | ||||
| 	"k8s.io/kubernetes/pkg/controller/util/selectors" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2" | ||||
| 	emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" | ||||
| 	metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1" | ||||
| @@ -767,7 +768,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform | ||||
| 	informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc()) | ||||
| 	defaultDownscalestabilizationWindow := 5 * time.Minute | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	hpaController := NewHorizontalController( | ||||
| 		tCtx, | ||||
| 		eventClient.CoreV1(), | ||||
| 		testScaleClient, | ||||
| 		testClient.AutoscalingV2(), | ||||
| @@ -5292,7 +5295,9 @@ func TestMultipleHPAs(t *testing.T) { | ||||
|  | ||||
| 	informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc()) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	hpaController := NewHorizontalController( | ||||
| 		tCtx, | ||||
| 		testClient.CoreV1(), | ||||
| 		testScaleClient, | ||||
| 		testClient.AutoscalingV2(), | ||||
| @@ -5310,10 +5315,8 @@ func TestMultipleHPAs(t *testing.T) { | ||||
| 	hpaController.scaleUpEvents = scaleUpEventsMap | ||||
| 	hpaController.scaleDownEvents = scaleDownEventsMap | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	informerFactory.Start(ctx.Done()) | ||||
| 	go hpaController.Run(ctx, 5) | ||||
| 	informerFactory.Start(tCtx.Done()) | ||||
| 	go hpaController.Run(tCtx, 5) | ||||
|  | ||||
| 	timeoutTime := time.After(15 * time.Second) | ||||
| 	timeout := false | ||||
|   | ||||
| @@ -1,25 +0,0 @@ | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package replicaset | ||||
|  | ||||
| import ( | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| func init() { | ||||
| 	klog.InitFlags(nil) | ||||
| } | ||||
| @@ -115,8 +115,9 @@ type ReplicaSetController struct { | ||||
| } | ||||
|  | ||||
| // NewReplicaSetController configures a replica set controller with the specified event recorder | ||||
| func NewReplicaSetController(logger klog.Logger, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| func NewReplicaSetController(ctx context.Context, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	if err := metrics.Register(legacyregistry.Register); err != nil { | ||||
| 		logger.Error(err, "unable to register metrics") | ||||
| 	} | ||||
| @@ -202,7 +203,7 @@ func (rsc *ReplicaSetController) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	rsc.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	rsc.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	rsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: rsc.kubeClient.CoreV1().Events("")}) | ||||
| 	defer rsc.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -50,10 +50,10 @@ import ( | ||||
| 	utiltesting "k8s.io/client-go/util/testing" | ||||
| 	"k8s.io/client-go/util/workqueue" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	"k8s.io/kubernetes/pkg/controller" | ||||
| 	. "k8s.io/kubernetes/pkg/controller/testutil" | ||||
| 	"k8s.io/kubernetes/pkg/securitycontext" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	"k8s.io/utils/pointer" | ||||
| ) | ||||
|  | ||||
| @@ -64,9 +64,9 @@ var ( | ||||
| func testNewReplicaSetControllerFromClient(tb testing.TB, client clientset.Interface, stopCh chan struct{}, burstReplicas int) (*ReplicaSetController, informers.SharedInformerFactory) { | ||||
| 	informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) | ||||
|  | ||||
| 	logger, _ := ktesting.NewTestContext(tb) | ||||
| 	tCtx := ktesting.Init(tb) | ||||
| 	ret := NewReplicaSetController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		informers.Apps().V1().ReplicaSets(), | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		client, | ||||
| @@ -628,9 +628,9 @@ func TestWatchControllers(t *testing.T) { | ||||
| 	stopCh := make(chan struct{}) | ||||
| 	defer close(stopCh) | ||||
| 	informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	manager := NewReplicaSetController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		informers.Apps().V1().ReplicaSets(), | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		client, | ||||
| @@ -659,7 +659,7 @@ func TestWatchControllers(t *testing.T) { | ||||
| 	} | ||||
| 	// Start only the ReplicaSet watcher and the workqueue, send a watch event, | ||||
| 	// and make sure it hits the sync method. | ||||
| 	go wait.UntilWithContext(ctx, manager.worker, 10*time.Millisecond) | ||||
| 	go wait.UntilWithContext(tCtx, manager.worker, 10*time.Millisecond) | ||||
|  | ||||
| 	testRSSpec.Name = "foo" | ||||
| 	fakeWatch.Add(&testRSSpec) | ||||
| @@ -1189,15 +1189,15 @@ func TestDeleteControllerAndExpectations(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestExpectationsOnRecreate(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	client := fake.NewSimpleClientset() | ||||
| 	stopCh := make(chan struct{}) | ||||
| 	defer close(stopCh) | ||||
|  | ||||
| 	f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	logger := tCtx.Logger() | ||||
| 	manager := NewReplicaSetController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		f.Apps().V1().ReplicaSets(), | ||||
| 		f.Core().V1().Pods(), | ||||
| 		client, | ||||
| @@ -1213,7 +1213,7 @@ func TestExpectationsOnRecreate(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	oldRS := newReplicaSet(1, map[string]string{"foo": "bar"}) | ||||
| 	oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(ctx, oldRS, metav1.CreateOptions{}) | ||||
| 	oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(tCtx, oldRS, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| @@ -1226,7 +1226,7 @@ func TestExpectationsOnRecreate(t *testing.T) { | ||||
| 		t.Fatalf("initial RS didn't result in new item in the queue: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	ok := manager.processNextWorkItem(ctx) | ||||
| 	ok := manager.processNextWorkItem(tCtx) | ||||
| 	if !ok { | ||||
| 		t.Fatal("queue is shutting down") | ||||
| 	} | ||||
| @@ -1257,7 +1257,7 @@ func TestExpectationsOnRecreate(t *testing.T) { | ||||
| 		t.Fatal("Unexpected item in the queue") | ||||
| 	} | ||||
|  | ||||
| 	err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(ctx, oldRS.Name, metav1.DeleteOptions{}) | ||||
| 	err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(tCtx, oldRS.Name, metav1.DeleteOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| @@ -1294,7 +1294,7 @@ func TestExpectationsOnRecreate(t *testing.T) { | ||||
|  | ||||
| 	newRS := oldRS.DeepCopy() | ||||
| 	newRS.UID = uuid.NewUUID() | ||||
| 	newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(ctx, newRS, metav1.CreateOptions{}) | ||||
| 	newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(tCtx, newRS, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| @@ -1312,7 +1312,7 @@ func TestExpectationsOnRecreate(t *testing.T) { | ||||
| 		t.Fatalf("Re-creating RS didn't result in new item in the queue: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	ok = manager.processNextWorkItem(ctx) | ||||
| 	ok = manager.processNextWorkItem(tCtx) | ||||
| 	if !ok { | ||||
| 		t.Fatal("Queue is shutting down!") | ||||
| 	} | ||||
|   | ||||
| @@ -26,6 +26,8 @@ limitations under the License. | ||||
| package replication | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	coreinformers "k8s.io/client-go/informers/core/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| @@ -48,8 +50,9 @@ type ReplicationManager struct { | ||||
| } | ||||
|  | ||||
| // NewReplicationManager configures a replication manager with the specified event recorder | ||||
| func NewReplicationManager(logger klog.Logger, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| func NewReplicationManager(ctx context.Context, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	return &ReplicationManager{ | ||||
| 		*replicaset.NewBaseController(logger, informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas, | ||||
| 			v1.SchemeGroupVersion.WithKind("ReplicationController"), | ||||
|   | ||||
| @@ -394,7 +394,7 @@ func (ec *Controller) Run(ctx context.Context, workers int) { | ||||
| 	logger.Info("Starting ephemeral volume controller") | ||||
| 	defer logger.Info("Shutting down ephemeral volume controller") | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	eventBroadcaster.StartLogging(klog.Infof) | ||||
| 	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ec.kubeClient.CoreV1().Events("")}) | ||||
| 	ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "resource_claim"}) | ||||
|   | ||||
| @@ -67,11 +67,12 @@ const ( | ||||
|  | ||||
| // NewController returns a new *Controller. | ||||
| func NewController( | ||||
| 	ctx context.Context, | ||||
| 	serviceCIDRInformer networkinginformers.ServiceCIDRInformer, | ||||
| 	ipAddressInformer networkinginformers.IPAddressInformer, | ||||
| 	client clientset.Interface, | ||||
| ) *Controller { | ||||
| 	broadcaster := record.NewBroadcaster() | ||||
| 	broadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName}) | ||||
| 	c := &Controller{ | ||||
| 		client:           client, | ||||
| @@ -129,7 +130,7 @@ func (c *Controller) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
| 	defer c.queue.ShutDown() | ||||
|  | ||||
| 	c.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	c.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")}) | ||||
| 	defer c.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -33,6 +33,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/controller" | ||||
| 	"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr" | ||||
| 	"k8s.io/kubernetes/pkg/registry/core/service/ipallocator" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	netutils "k8s.io/utils/net" | ||||
| 	"k8s.io/utils/ptr" | ||||
| ) | ||||
| @@ -43,7 +44,7 @@ type testController struct { | ||||
| 	ipaddressesStore  cache.Store | ||||
| } | ||||
|  | ||||
| func newController(t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips []*networkingapiv1alpha1.IPAddress) (*fake.Clientset, *testController) { | ||||
| func newController(ctx context.Context, t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips []*networkingapiv1alpha1.IPAddress) (*fake.Clientset, *testController) { | ||||
| 	client := fake.NewSimpleClientset() | ||||
|  | ||||
| 	informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) | ||||
| @@ -65,6 +66,7 @@ func newController(t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips | ||||
| 		} | ||||
| 	} | ||||
| 	controller := NewController( | ||||
| 		ctx, | ||||
| 		serviceCIDRInformer, | ||||
| 		ipAddressInformer, | ||||
| 		client) | ||||
| @@ -233,11 +235,12 @@ func TestControllerSync(t *testing.T) { | ||||
|  | ||||
| 	for _, tc := range testCases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			client, controller := newController(t, tc.cidrs, tc.ips) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			client, controller := newController(tCtx, t, tc.cidrs, tc.ips) | ||||
| 			// server side apply does not play well with fake client go | ||||
| 			// so we skup the errors and only assert on the actions | ||||
| 			// https://github.com/kubernetes/kubernetes/issues/99953 | ||||
| 			_ = controller.sync(context.Background(), tc.cidrSynced) | ||||
| 			_ = controller.sync(tCtx, tc.cidrSynced) | ||||
| 			expectAction(t, client.Actions(), tc.actions) | ||||
|  | ||||
| 		}) | ||||
| @@ -423,13 +426,14 @@ func TestController_canDeleteCIDR(t *testing.T) { | ||||
| 	} | ||||
| 	for _, tc := range tests { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			_, controller := newController(t, tc.cidrs, tc.ips) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			_, controller := newController(tCtx, t, tc.cidrs, tc.ips) | ||||
| 			err := controller.syncCIDRs() | ||||
| 			if err != nil { | ||||
| 				t.Fatal(err) | ||||
| 			} | ||||
|  | ||||
| 			got, err := controller.canDeleteCIDR(context.Background(), tc.cidrSynced) | ||||
| 			got, err := controller.canDeleteCIDR(tCtx, tc.cidrSynced) | ||||
| 			if err != nil { | ||||
| 				t.Fatal(err) | ||||
| 			} | ||||
| @@ -528,7 +532,8 @@ func TestController_ipToCidrs(t *testing.T) { | ||||
| 		}} | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			_, controller := newController(t, tt.cidrs, nil) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			_, controller := newController(tCtx, t, tt.cidrs, nil) | ||||
| 			err := controller.syncCIDRs() | ||||
| 			if err != nil { | ||||
| 				t.Fatal(err) | ||||
| @@ -584,7 +589,8 @@ func TestController_cidrToCidrs(t *testing.T) { | ||||
| 		}} | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			_, controller := newController(t, tt.cidrs, nil) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			_, controller := newController(tCtx, t, tt.cidrs, nil) | ||||
| 			err := controller.syncCIDRs() | ||||
| 			if err != nil { | ||||
| 				t.Fatal(err) | ||||
|   | ||||
| @@ -86,7 +86,7 @@ func NewStatefulSetController( | ||||
| 	kubeClient clientset.Interface, | ||||
| ) *StatefulSetController { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"}) | ||||
| 	ssc := &StatefulSetController{ | ||||
| 		kubeClient: kubeClient, | ||||
| @@ -151,7 +151,7 @@ func (ssc *StatefulSetController) Run(ctx context.Context, workers int) { | ||||
| 	defer utilruntime.HandleCrash() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	ssc.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	ssc.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	ssc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ssc.kubeClient.CoreV1().Events("")}) | ||||
| 	defer ssc.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -188,7 +188,7 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration { | ||||
| func New(ctx context.Context, c clientset.Interface, podInformer corev1informers.PodInformer, nodeInformer corev1informers.NodeInformer, controllerName string) (*Controller, error) { | ||||
| 	logger := klog.FromContext(ctx) | ||||
| 	metrics.Register() | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName}) | ||||
|  | ||||
| 	podIndexer := podInformer.Informer().GetIndexer() | ||||
| @@ -286,7 +286,7 @@ func (tc *Controller) Run(ctx context.Context) { | ||||
| 	defer logger.Info("Shutting down controller", "controller", tc.name) | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	tc.broadcaster.StartStructuredLogging(0) | ||||
| 	tc.broadcaster.StartStructuredLogging(3) | ||||
| 	if tc.client != nil { | ||||
| 		logger.Info("Sending events to api server") | ||||
| 		tc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: tc.client.CoreV1().Events("")}) | ||||
|   | ||||
| @@ -70,8 +70,8 @@ type Controller struct { | ||||
|  | ||||
| // New creates an instance of Controller | ||||
| func New(ctx context.Context, jobInformer batchinformers.JobInformer, client clientset.Interface) *Controller { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster.StartStructuredLogging(0) | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	eventBroadcaster.StartStructuredLogging(3) | ||||
| 	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) | ||||
|  | ||||
| 	metrics.Register() | ||||
|   | ||||
| @@ -106,7 +106,7 @@ type AttachDetachController interface { | ||||
|  | ||||
| // NewAttachDetachController returns a new instance of AttachDetachController. | ||||
| func NewAttachDetachController( | ||||
| 	logger klog.Logger, | ||||
| 	ctx context.Context, | ||||
| 	kubeClient clientset.Interface, | ||||
| 	podInformer coreinformers.PodInformer, | ||||
| 	nodeInformer coreinformers.NodeInformer, | ||||
| @@ -123,6 +123,8 @@ func NewAttachDetachController( | ||||
| 	disableForceDetachOnTimeout bool, | ||||
| 	timerConfig TimerConfig) (AttachDetachController, error) { | ||||
|  | ||||
| 	logger := klog.FromContext(ctx) | ||||
|  | ||||
| 	adc := &attachDetachController{ | ||||
| 		kubeClient:  kubeClient, | ||||
| 		pvcLister:   pvcInformer.Lister(), | ||||
| @@ -151,7 +153,7 @@ func NewAttachDetachController( | ||||
| 		return nil, fmt.Errorf("could not initialize volume plugins for Attach/Detach Controller: %w", err) | ||||
| 	} | ||||
|  | ||||
| 	adc.broadcaster = record.NewBroadcaster() | ||||
| 	adc.broadcaster = record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	recorder := adc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"}) | ||||
| 	blkutil := volumepathhandler.NewBlockVolumePathHandler() | ||||
|  | ||||
| @@ -332,7 +334,7 @@ func (adc *attachDetachController) Run(ctx context.Context) { | ||||
| 	defer adc.pvcQueue.ShutDown() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	adc.broadcaster.StartStructuredLogging(0) | ||||
| 	adc.broadcaster.StartStructuredLogging(3) | ||||
| 	adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")}) | ||||
| 	defer adc.broadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -30,13 +30,13 @@ import ( | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| 	kcache "k8s.io/client-go/tools/cache" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	"k8s.io/kubernetes/pkg/controller" | ||||
| 	"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" | ||||
| 	controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" | ||||
| 	"k8s.io/kubernetes/pkg/volume" | ||||
| 	"k8s.io/kubernetes/pkg/volume/csi" | ||||
| 	"k8s.io/kubernetes/pkg/volume/util" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| @@ -50,9 +50,9 @@ func Test_NewAttachDetachController_Positive(t *testing.T) { | ||||
| 	informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) | ||||
|  | ||||
| 	// Act | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	_, err := NewAttachDetachController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		fakeKubeClient, | ||||
| 		informerFactory.Core().V1().Pods(), | ||||
| 		informerFactory.Core().V1().Nodes(), | ||||
| @@ -81,11 +81,9 @@ func Test_AttachDetachControllerStateOfWorldPopulators_Positive(t *testing.T) { | ||||
| 	fakeKubeClient := controllervolumetesting.CreateTestClient() | ||||
| 	informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) | ||||
|  | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	adcObj, err := NewAttachDetachController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		fakeKubeClient, | ||||
| 		informerFactory.Core().V1().Pods(), | ||||
| 		informerFactory.Core().V1().Nodes(), | ||||
| @@ -109,8 +107,8 @@ func Test_AttachDetachControllerStateOfWorldPopulators_Positive(t *testing.T) { | ||||
| 	adc := adcObj.(*attachDetachController) | ||||
|  | ||||
| 	// Act | ||||
| 	informerFactory.Start(ctx.Done()) | ||||
| 	informerFactory.WaitForCacheSync(ctx.Done()) | ||||
| 	informerFactory.Start(tCtx.Done()) | ||||
| 	informerFactory.WaitForCacheSync(tCtx.Done()) | ||||
|  | ||||
| 	err = adc.populateActualStateOfWorld(logger) | ||||
| 	if err != nil { | ||||
| @@ -206,11 +204,9 @@ func BenchmarkPopulateActualStateOfWorld(b *testing.B) { | ||||
| 	} | ||||
| 	informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) | ||||
|  | ||||
| 	logger, ctx := ktesting.NewTestContext(b) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	logger, tCtx := ktesting.NewTestContext(b) | ||||
| 	adcObj, err := NewAttachDetachController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		fakeKubeClient, | ||||
| 		informerFactory.Core().V1().Pods(), | ||||
| 		informerFactory.Core().V1().Nodes(), | ||||
| @@ -234,8 +230,8 @@ func BenchmarkPopulateActualStateOfWorld(b *testing.B) { | ||||
| 	adc := adcObj.(*attachDetachController) | ||||
|  | ||||
| 	// Act | ||||
| 	informerFactory.Start(ctx.Done()) | ||||
| 	informerFactory.WaitForCacheSync(ctx.Done()) | ||||
| 	informerFactory.Start(tCtx.Done()) | ||||
| 	informerFactory.WaitForCacheSync(tCtx.Done()) | ||||
|  | ||||
| 	b.ResetTimer() | ||||
| 	err = adc.populateActualStateOfWorld(logger) | ||||
| @@ -267,11 +263,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
| 	var podsNum, extraPodsNum, nodesNum, i int | ||||
|  | ||||
| 	// Create the controller | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	adcObj, err := NewAttachDetachController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		fakeKubeClient, | ||||
| 		informerFactory.Core().V1().Pods(), | ||||
| 		informerFactory.Core().V1().Nodes(), | ||||
| @@ -295,7 +289,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
|  | ||||
| 	adc := adcObj.(*attachDetachController) | ||||
|  | ||||
| 	pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) | ||||
| 	pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) | ||||
| 	} | ||||
| @@ -305,7 +299,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
| 		podInformer.GetIndexer().Add(&podToAdd) | ||||
| 		podsNum++ | ||||
| 	} | ||||
| 	nodes, err := fakeKubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) | ||||
| 	nodes, err := fakeKubeClient.CoreV1().Nodes().List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) | ||||
| 	} | ||||
| @@ -315,7 +309,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
| 		nodesNum++ | ||||
| 	} | ||||
|  | ||||
| 	csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(context.TODO(), metav1.ListOptions{}) | ||||
| 	csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) | ||||
| 	} | ||||
| @@ -324,9 +318,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
| 		csiNodeInformer.GetIndexer().Add(&csiNodeToAdd) | ||||
| 	} | ||||
|  | ||||
| 	informerFactory.Start(ctx.Done()) | ||||
| 	informerFactory.Start(tCtx.Done()) | ||||
|  | ||||
| 	if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), | ||||
| 	if !kcache.WaitForNamedCacheSync("attach detach", tCtx.Done(), | ||||
| 		informerFactory.Core().V1().Pods().Informer().HasSynced, | ||||
| 		informerFactory.Core().V1().Nodes().Informer().HasSynced, | ||||
| 		informerFactory.Storage().V1().CSINodes().Informer().HasSynced) { | ||||
| @@ -382,7 +376,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
|  | ||||
| 	for _, newPod := range extraPods1 { | ||||
| 		// Add a new pod between ASW and DSW ppoulators | ||||
| 		_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{}) | ||||
| 		_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(tCtx, newPod, metav1.CreateOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) | ||||
| 		} | ||||
| @@ -399,7 +393,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
|  | ||||
| 	for _, newPod := range extraPods2 { | ||||
| 		// Add a new pod between DSW ppoulator and reconciler run | ||||
| 		_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{}) | ||||
| 		_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(tCtx, newPod, metav1.CreateOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) | ||||
| 		} | ||||
| @@ -407,8 +401,8 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 | ||||
| 		podInformer.GetIndexer().Add(newPod) | ||||
| 	} | ||||
|  | ||||
| 	go adc.reconciler.Run(ctx) | ||||
| 	go adc.desiredStateOfWorldPopulator.Run(ctx) | ||||
| 	go adc.reconciler.Run(tCtx) | ||||
| 	go adc.desiredStateOfWorldPopulator.Run(tCtx) | ||||
|  | ||||
| 	time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once | ||||
|  | ||||
| @@ -533,11 +527,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 	vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer() | ||||
|  | ||||
| 	// Create the controller | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	logger, tCtx := ktesting.NewTestContext(t) | ||||
| 	adcObj, err := NewAttachDetachController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		fakeKubeClient, | ||||
| 		informerFactory.Core().V1().Pods(), | ||||
| 		informerFactory.Core().V1().Nodes(), | ||||
| @@ -560,7 +552,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 	adc := adcObj.(*attachDetachController) | ||||
|  | ||||
| 	// Add existing objects (created by testplugin) to the respective informers | ||||
| 	pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) | ||||
| 	pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) | ||||
| 	} | ||||
| @@ -568,7 +560,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 		podToAdd := pod | ||||
| 		podInformer.GetIndexer().Add(&podToAdd) | ||||
| 	} | ||||
| 	nodes, err := fakeKubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) | ||||
| 	nodes, err := fakeKubeClient.CoreV1().Nodes().List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) | ||||
| 	} | ||||
| @@ -597,7 +589,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 		_, err = adc.kubeClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}) | ||||
| 		_, err = adc.kubeClient.CoreV1().Nodes().Update(tCtx, newNode, metav1.UpdateOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) | ||||
| 		} | ||||
| @@ -606,7 +598,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 	// Create and add objects requested by the test | ||||
| 	if tc.podName != "" { | ||||
| 		newPod := controllervolumetesting.NewPodWithVolume(tc.podName, tc.volName, tc.podNodeName) | ||||
| 		_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{}) | ||||
| 		_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(tCtx, newPod, metav1.CreateOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err) | ||||
| 		} | ||||
| @@ -621,7 +613,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 			// Otherwise use NFS, which is not subject to migration. | ||||
| 			newPv = controllervolumetesting.NewNFSPV(tc.pvName, tc.volName) | ||||
| 		} | ||||
| 		_, err = adc.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), newPv, metav1.CreateOptions{}) | ||||
| 		_, err = adc.kubeClient.CoreV1().PersistentVolumes().Create(tCtx, newPv, metav1.CreateOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Run failed with error. Failed to create a new pv: <%v>", err) | ||||
| 		} | ||||
| @@ -629,7 +621,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 	} | ||||
| 	if tc.vaName != "" { | ||||
| 		newVa := controllervolumetesting.NewVolumeAttachment(tc.vaName, tc.pvName, tc.vaNodeName, tc.vaAttachStatus) | ||||
| 		_, err = adc.kubeClient.StorageV1().VolumeAttachments().Create(context.TODO(), newVa, metav1.CreateOptions{}) | ||||
| 		_, err = adc.kubeClient.StorageV1().VolumeAttachments().Create(tCtx, newVa, metav1.CreateOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Run failed with error. Failed to create a new volumeAttachment: <%v>", err) | ||||
| 		} | ||||
| @@ -637,9 +629,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 	} | ||||
|  | ||||
| 	// Makesure the informer cache is synced | ||||
| 	informerFactory.Start(ctx.Done()) | ||||
| 	informerFactory.Start(tCtx.Done()) | ||||
|  | ||||
| 	if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), | ||||
| 	if !kcache.WaitForNamedCacheSync("attach detach", tCtx.Done(), | ||||
| 		informerFactory.Core().V1().Pods().Informer().HasSynced, | ||||
| 		informerFactory.Core().V1().Nodes().Informer().HasSynced, | ||||
| 		informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced, | ||||
| @@ -659,8 +651,8 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { | ||||
| 		t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) | ||||
| 	} | ||||
| 	// Run reconciler and DSW populator loops | ||||
| 	go adc.reconciler.Run(ctx) | ||||
| 	go adc.desiredStateOfWorldPopulator.Run(ctx) | ||||
| 	go adc.reconciler.Run(tCtx) | ||||
| 	go adc.desiredStateOfWorldPopulator.Run(tCtx) | ||||
| 	if tc.csiMigration { | ||||
| 		verifyExpectedVolumeState(t, adc, tc) | ||||
| 	} else { | ||||
|   | ||||
| @@ -76,6 +76,7 @@ type ephemeralController struct { | ||||
|  | ||||
| // NewController creates an ephemeral volume controller. | ||||
| func NewController( | ||||
| 	ctx context.Context, | ||||
| 	kubeClient clientset.Interface, | ||||
| 	podInformer coreinformers.PodInformer, | ||||
| 	pvcInformer coreinformers.PersistentVolumeClaimInformer) (Controller, error) { | ||||
| @@ -92,7 +93,7 @@ func NewController( | ||||
|  | ||||
| 	ephemeralvolumemetrics.RegisterMetrics() | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	eventBroadcaster.StartLogging(klog.Infof) | ||||
| 	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) | ||||
| 	ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"}) | ||||
|   | ||||
| @@ -22,6 +22,7 @@ import ( | ||||
| 	"sort" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/assert" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	apierrors "k8s.io/apimachinery/pkg/api/errors" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| @@ -35,8 +36,6 @@ import ( | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/kubernetes/pkg/controller" | ||||
| 	ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/volume/ephemeral/metrics" | ||||
|  | ||||
| 	"github.com/stretchr/testify/assert" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| @@ -146,7 +145,7 @@ func TestSyncHandler(t *testing.T) { | ||||
| 			podInformer := informerFactory.Core().V1().Pods() | ||||
| 			pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() | ||||
|  | ||||
| 			c, err := NewController(fakeKubeClient, podInformer, pvcInformer) | ||||
| 			c, err := NewController(ctx, fakeKubeClient, podInformer, pvcInformer) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("error creating ephemeral controller : %v", err) | ||||
| 			} | ||||
|   | ||||
| @@ -100,6 +100,7 @@ type expandController struct { | ||||
|  | ||||
| // NewExpandController expands the pvs | ||||
| func NewExpandController( | ||||
| 	ctx context.Context, | ||||
| 	kubeClient clientset.Interface, | ||||
| 	pvcInformer coreinformers.PersistentVolumeClaimInformer, | ||||
| 	cloud cloudprovider.Interface, | ||||
| @@ -121,8 +122,8 @@ func NewExpandController( | ||||
| 		return nil, fmt.Errorf("could not initialize volume plugins for Expand Controller : %+v", err) | ||||
| 	} | ||||
|  | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster.StartStructuredLogging(0) | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	eventBroadcaster.StartStructuredLogging(3) | ||||
| 	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) | ||||
| 	expc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"}) | ||||
| 	blkutil := volumepathhandler.NewBlockVolumePathHandler() | ||||
|   | ||||
| @@ -42,6 +42,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/volume/util" | ||||
| 	"k8s.io/kubernetes/pkg/volume/util/operationexecutor" | ||||
| 	volumetypes "k8s.io/kubernetes/pkg/volume/util/types" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| func TestSyncHandler(t *testing.T) { | ||||
| @@ -91,6 +92,7 @@ func TestSyncHandler(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	for _, tc := range tests { | ||||
| 		tCtx := ktesting.Init(t) | ||||
| 		test := tc | ||||
| 		fakeKubeClient := controllervolumetesting.CreateTestClient() | ||||
| 		informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) | ||||
| @@ -106,7 +108,7 @@ func TestSyncHandler(t *testing.T) { | ||||
| 		} | ||||
| 		allPlugins := []volume.VolumePlugin{} | ||||
| 		translator := csitrans.New() | ||||
| 		expc, err := NewExpandController(fakeKubeClient, pvcInformer, nil, allPlugins, translator, csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate)) | ||||
| 		expc, err := NewExpandController(tCtx, fakeKubeClient, pvcInformer, nil, allPlugins, translator, csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate)) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("error creating expand controller : %v", err) | ||||
| 		} | ||||
|   | ||||
| @@ -77,7 +77,7 @@ type ControllerParameters struct { | ||||
|  | ||||
| // NewController creates a new PersistentVolume controller | ||||
| func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) { | ||||
| 	eventBroadcaster := record.NewBroadcaster() | ||||
| 	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) | ||||
| 	eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"}) | ||||
|  | ||||
| 	controller := &PersistentVolumeController{ | ||||
| @@ -305,7 +305,7 @@ func (ctrl *PersistentVolumeController) Run(ctx context.Context) { | ||||
| 	defer ctrl.volumeQueue.ShutDown() | ||||
|  | ||||
| 	// Start events processing pipeline. | ||||
| 	ctrl.eventBroadcaster.StartStructuredLogging(0) | ||||
| 	ctrl.eventBroadcaster.StartStructuredLogging(3) | ||||
| 	ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")}) | ||||
| 	defer ctrl.eventBroadcaster.Shutdown() | ||||
|  | ||||
|   | ||||
| @@ -30,11 +30,11 @@ import ( | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	clientbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" | ||||
| 	restclient "k8s.io/client-go/rest" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/cronjob" | ||||
| 	"k8s.io/kubernetes/pkg/controller/job" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| func setup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *cronjob.ControllerV2, *job.Controller, informers.SharedInformerFactory, clientset.Interface) { | ||||
| @@ -148,16 +148,13 @@ func validateJobAndPod(t *testing.T, clientSet clientset.Interface, namespace st | ||||
| } | ||||
|  | ||||
| func TestCronJobLaunchesPodAndCleansUp(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	tCtx := ktesting.Init(t) | ||||
|  | ||||
| 	closeFn, cjc, jc, informerSet, clientSet := setup(ctx, t) | ||||
| 	closeFn, cjc, jc, informerSet, clientSet := setup(tCtx, t) | ||||
| 	defer closeFn() | ||||
|  | ||||
| 	// When shutting down, cancel must be called before closeFn. | ||||
| 	// We simply call it multiple times. | ||||
| 	defer cancel() | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
|  | ||||
| 	cronJobName := "foo" | ||||
| 	namespaceName := "simple-cronjob-test" | ||||
| @@ -167,11 +164,11 @@ func TestCronJobLaunchesPodAndCleansUp(t *testing.T) { | ||||
|  | ||||
| 	cjClient := clientSet.BatchV1().CronJobs(ns.Name) | ||||
|  | ||||
| 	informerSet.Start(ctx.Done()) | ||||
| 	go cjc.Run(ctx, 1) | ||||
| 	go jc.Run(ctx, 1) | ||||
| 	informerSet.Start(tCtx.Done()) | ||||
| 	go cjc.Run(tCtx, 1) | ||||
| 	go jc.Run(tCtx, 1) | ||||
|  | ||||
| 	_, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{}) | ||||
| 	_, err := cjClient.Create(tCtx, newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create CronJob: %v", err) | ||||
| 	} | ||||
|   | ||||
| @@ -30,7 +30,6 @@ import ( | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	restclient "k8s.io/client-go/rest" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	podutil "k8s.io/kubernetes/pkg/api/v1/pod" | ||||
| 	"k8s.io/kubernetes/pkg/controller/deployment" | ||||
| @@ -105,7 +104,6 @@ func newDeployment(name, ns string, replicas int32) *apps.Deployment { | ||||
| func dcSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) { | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
|  | ||||
| 	config := restclient.CopyConfig(server.ClientConfig) | ||||
| 	clientSet, err := clientset.NewForConfig(config) | ||||
| @@ -126,7 +124,7 @@ func dcSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFu | ||||
| 		t.Fatalf("error creating Deployment controller: %v", err) | ||||
| 	} | ||||
| 	rm := replicaset.NewReplicaSetController( | ||||
| 		logger, | ||||
| 		ctx, | ||||
| 		informers.Apps().V1().ReplicaSets(), | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")), | ||||
|   | ||||
| @@ -539,14 +539,15 @@ func createPDBUsingRemovedAPI(ctx context.Context, etcdClient *clientv3.Client, | ||||
| } | ||||
|  | ||||
| func TestPatchCompatibility(t *testing.T) { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
|  | ||||
| 	s, pdbc, _, clientSet, _, _ := setup(ctx, t) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	s, pdbc, _, clientSet, _, _ := setup(tCtx, t) | ||||
| 	defer s.TearDownFn() | ||||
| 	// Even though pdbc isn't used in this test, its creation is already | ||||
| 	// spawning some goroutines. So we need to run it to ensure they won't leak. | ||||
| 	cancel() | ||||
| 	pdbc.Run(ctx) | ||||
| 	// We can't cancel immediately but later, because when the context is canceled, | ||||
| 	// the event broadcaster will be shut down . | ||||
| 	defer tCtx.Cancel("cleaning up") | ||||
| 	go pdbc.Run(tCtx) | ||||
|  | ||||
| 	testcases := []struct { | ||||
| 		name             string | ||||
|   | ||||
| @@ -17,7 +17,6 @@ limitations under the License. | ||||
| package dualstack | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| @@ -44,11 +43,9 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 		return map[string]string{"foo": "bar"} | ||||
| 	} | ||||
|  | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	tCtx := ktesting.Init(t) | ||||
|  | ||||
| 	client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ | ||||
| 	client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{ | ||||
| 		ModifyServerRunOptions: func(opts *options.ServerRunOptions) { | ||||
| 			opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR) | ||||
| 			// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| @@ -59,7 +56,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
|  | ||||
| 	// Wait until the default "kubernetes" service is created. | ||||
| 	if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { | ||||
| 		_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{}) | ||||
| 		_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{}) | ||||
| 		if err != nil && !apierrors.IsNotFound(err) { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -88,11 +85,12 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	if _, err := client.CoreV1().Nodes().Create(ctx, testNode, metav1.CreateOptions{}); err != nil { | ||||
| 	if _, err := client.CoreV1().Nodes().Create(tCtx, testNode, metav1.CreateOptions{}); err != nil { | ||||
| 		t.Fatalf("Failed to create Node %q: %v", testNode.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	epController := endpoint.NewEndpointController( | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| @@ -100,7 +98,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	epsController := endpointslice.NewController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Nodes(), | ||||
| @@ -110,10 +108,10 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	// use only one worker to serialize the updates | ||||
| 	go epController.Run(ctx, 1) | ||||
| 	go epsController.Run(ctx, 1) | ||||
| 	go epController.Run(tCtx, 1) | ||||
| 	go epsController.Run(tCtx, 1) | ||||
|  | ||||
| 	var testcases = []struct { | ||||
| 		name           string | ||||
| @@ -170,7 +168,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 				}, | ||||
| 			} | ||||
|  | ||||
| 			createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}) | ||||
| 			createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create pod %s: %v", pod.Name, err) | ||||
| 			} | ||||
| @@ -181,7 +179,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 				Phase:  v1.PodRunning, | ||||
| 				PodIPs: []v1.PodIP{{IP: podIPbyFamily[v1.IPv4Protocol]}, {IP: podIPbyFamily[v1.IPv6Protocol]}}, | ||||
| 			} | ||||
| 			_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{}) | ||||
| 			_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) | ||||
| 			} | ||||
| @@ -209,7 +207,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 			} | ||||
|  | ||||
| 			// create a service | ||||
| 			_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}) | ||||
| 			_, err = client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Error creating service: %v", err) | ||||
| 			} | ||||
| @@ -218,7 +216,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 			// legacy endpoints are not dual stack | ||||
| 			// and use the address of the first IP family | ||||
| 			if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 				e, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) | ||||
| 				e, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{}) | ||||
| 				if err != nil { | ||||
| 					t.Logf("Error fetching endpoints: %v", err) | ||||
| 					return false, nil | ||||
| @@ -240,7 +238,7 @@ func TestDualStackEndpoints(t *testing.T) { | ||||
| 			// wait until the endpoint slices are created | ||||
| 			err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 				lSelector := discovery.LabelServiceName + "=" + svc.Name | ||||
| 				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector}) | ||||
| 				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(tCtx, metav1.ListOptions{LabelSelector: lSelector}) | ||||
| 				if err != nil { | ||||
| 					t.Logf("Error listing EndpointSlices: %v", err) | ||||
| 					return false, nil | ||||
|   | ||||
| @@ -17,7 +17,6 @@ limitations under the License. | ||||
| package endpoints | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
| @@ -33,6 +32,7 @@ import ( | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/endpoint" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| func TestEndpointUpdates(t *testing.T) { | ||||
| @@ -47,7 +47,9 @@ func TestEndpointUpdates(t *testing.T) { | ||||
|  | ||||
| 	informers := informers.NewSharedInformerFactory(client, 0) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	epController := endpoint.NewEndpointController( | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| @@ -55,10 +57,8 @@ func TestEndpointUpdates(t *testing.T) { | ||||
| 		0) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go epController.Run(ctx, 1) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go epController.Run(tCtx, 1) | ||||
|  | ||||
| 	// Create namespace | ||||
| 	ns := framework.CreateNamespaceOrDie(client, "test-endpoints-updates", t) | ||||
| @@ -82,7 +82,7 @@ func TestEndpointUpdates(t *testing.T) { | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}) | ||||
| 	createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create pod %s: %v", pod.Name, err) | ||||
| 	} | ||||
| @@ -92,14 +92,14 @@ func TestEndpointUpdates(t *testing.T) { | ||||
| 		Phase:  v1.PodRunning, | ||||
| 		PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}}, | ||||
| 	} | ||||
| 	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{}) | ||||
| 	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	// Create a service associated to the pod | ||||
| 	svc := newService(ns.Name, "foo1") | ||||
| 	svc1, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}) | ||||
| 	svc1, err := client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create service %s: %v", svc.Name, err) | ||||
| 	} | ||||
| @@ -107,7 +107,7 @@ func TestEndpointUpdates(t *testing.T) { | ||||
| 	// Obtain ResourceVersion of the new endpoint created | ||||
| 	var resVersion string | ||||
| 	if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Logf("error fetching endpoints: %v", err) | ||||
| 			return false, nil | ||||
| @@ -120,7 +120,7 @@ func TestEndpointUpdates(t *testing.T) { | ||||
|  | ||||
| 	// Force recomputation on the endpoint controller | ||||
| 	svc1.SetAnnotations(map[string]string{"foo": "bar"}) | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Update(ctx, svc1, metav1.UpdateOptions{}) | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Update(tCtx, svc1, metav1.UpdateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to update service %s: %v", svc1.Name, err) | ||||
| 	} | ||||
| @@ -130,13 +130,13 @@ func TestEndpointUpdates(t *testing.T) { | ||||
| 	// was recomputed before asserting, since we only have 1 worker | ||||
| 	// in the endpoint controller | ||||
| 	svc2 := newService(ns.Name, "foo2") | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc2, metav1.CreateOptions{}) | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Create(tCtx, svc2, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create service %s: %v", svc.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 		_, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc2.Name, metav1.GetOptions{}) | ||||
| 		_, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc2.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Logf("error fetching endpoints: %v", err) | ||||
| 			return false, nil | ||||
| @@ -148,7 +148,7 @@ func TestEndpointUpdates(t *testing.T) { | ||||
|  | ||||
| 	// the endpoint controller should not update the endpoint created for the original | ||||
| 	// service since nothing has changed, the resource version has to be the same | ||||
| 	endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) | ||||
| 	endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("error fetching endpoints: %v", err) | ||||
| 	} | ||||
| @@ -173,7 +173,9 @@ func TestExternalNameToClusterIPTransition(t *testing.T) { | ||||
|  | ||||
| 	informers := informers.NewSharedInformerFactory(client, 0) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	epController := endpoint.NewEndpointController( | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| @@ -181,10 +183,8 @@ func TestExternalNameToClusterIPTransition(t *testing.T) { | ||||
| 		0) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go epController.Run(ctx, 1) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go epController.Run(tCtx, 1) | ||||
|  | ||||
| 	// Create namespace | ||||
| 	ns := framework.CreateNamespaceOrDie(client, "test-endpoints-updates", t) | ||||
| @@ -208,7 +208,7 @@ func TestExternalNameToClusterIPTransition(t *testing.T) { | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}) | ||||
| 	createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create pod %s: %v", pod.Name, err) | ||||
| 	} | ||||
| @@ -218,20 +218,20 @@ func TestExternalNameToClusterIPTransition(t *testing.T) { | ||||
| 		Phase:  v1.PodRunning, | ||||
| 		PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}}, | ||||
| 	} | ||||
| 	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{}) | ||||
| 	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	// Create an ExternalName service associated to the pod | ||||
| 	svc := newExternalNameService(ns.Name, "foo1") | ||||
| 	svc1, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}) | ||||
| 	svc1, err := client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create service %s: %v", svc.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{}) | ||||
| 		if err == nil { | ||||
| 			t.Errorf("expected no endpoints for externalName service, got: %v", endpoints) | ||||
| 			return true, nil | ||||
| @@ -244,13 +244,13 @@ func TestExternalNameToClusterIPTransition(t *testing.T) { | ||||
|  | ||||
| 	// update service to ClusterIP type and verify endpoint was created | ||||
| 	svc1.Spec.Type = v1.ServiceTypeClusterIP | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Update(ctx, svc1, metav1.UpdateOptions{}) | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Update(tCtx, svc1, metav1.UpdateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to update service %s: %v", svc1.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 		ep, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc1.Name, metav1.GetOptions{}) | ||||
| 		ep, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc1.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Logf("no endpoints found, error: %v", err) | ||||
| 			return false, nil | ||||
| @@ -282,7 +282,9 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
|  | ||||
| 	informers := informers.NewSharedInformerFactory(client, 0) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	epController := endpoint.NewEndpointController( | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| @@ -290,10 +292,8 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
| 		0) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go epController.Run(ctx, 1) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go epController.Run(tCtx, 1) | ||||
|  | ||||
| 	// Create namespace | ||||
| 	ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t) | ||||
| @@ -337,13 +337,13 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}) | ||||
| 	createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create pod %s: %v", pod.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	createdPod.Status = pod.Status | ||||
| 	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{}) | ||||
| 	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) | ||||
| 	} | ||||
| @@ -366,14 +366,14 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}) | ||||
| 	_, err = client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create service %s: %v", svc.Name, err) | ||||
| 	} | ||||
|  | ||||
| 	// poll until associated Endpoints to the previously created Service exists | ||||
| 	if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, nil | ||||
| 		} | ||||
| @@ -392,7 +392,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
| 		t.Fatalf("endpoints not found: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	err = client.CoreV1().Pods(ns.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{}) | ||||
| 	err = client.CoreV1().Pods(ns.Name).Delete(tCtx, pod.Name, metav1.DeleteOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("error deleting test pod: %v", err) | ||||
| 	} | ||||
| @@ -401,7 +401,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
| 	if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { | ||||
| 		// Ensure that the recently deleted Pod exists but with a deletion timestamp. If the Pod does not exist, | ||||
| 		// we should fail the test since it is no longer validating against a terminating pod. | ||||
| 		pod, err := client.CoreV1().Pods(ns.Name).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 		pod, err := client.CoreV1().Pods(ns.Name).Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 		if apierrors.IsNotFound(err) { | ||||
| 			return false, fmt.Errorf("expected Pod %q to exist with deletion timestamp but was not found: %v", pod.Name, err) | ||||
| 		} | ||||
| @@ -413,7 +413,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) { | ||||
| 			return false, errors.New("pod did not have deletion timestamp set") | ||||
| 		} | ||||
|  | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{}) | ||||
| 		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, nil | ||||
| 		} | ||||
|   | ||||
| @@ -30,12 +30,12 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/endpoint" | ||||
| 	"k8s.io/kubernetes/pkg/controller/endpointslice" | ||||
| 	"k8s.io/kubernetes/pkg/controller/endpointslicemirroring" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| func TestEndpointSliceMirroring(t *testing.T) { | ||||
| @@ -48,11 +48,12 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| 		t.Fatalf("Error creating clientset: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informers := informers.NewSharedInformerFactory(client, resyncPeriod) | ||||
|  | ||||
| 	epController := endpoint.NewEndpointController( | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| @@ -60,7 +61,7 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	epsController := endpointslice.NewController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Services(), | ||||
| 		informers.Core().V1().Nodes(), | ||||
| @@ -70,7 +71,7 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	epsmController := endpointslicemirroring.NewController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| 		informers.Discovery().V1().EndpointSlices(), | ||||
| 		informers.Core().V1().Services(), | ||||
| @@ -79,11 +80,10 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	defer cancel() | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go epController.Run(ctx, 5) | ||||
| 	go epsController.Run(ctx, 5) | ||||
| 	go epsmController.Run(ctx, 5) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go epController.Run(tCtx, 5) | ||||
| 	go epsController.Run(tCtx, 5) | ||||
| 	go epsmController.Run(tCtx, 5) | ||||
|  | ||||
| 	testCases := []struct { | ||||
| 		testName                     string | ||||
| @@ -259,7 +259,7 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| 			if tc.service != nil { | ||||
| 				resourceName = tc.service.Name | ||||
| 				tc.service.Namespace = ns.Name | ||||
| 				_, err = client.CoreV1().Services(ns.Name).Create(ctx, tc.service, metav1.CreateOptions{}) | ||||
| 				_, err = client.CoreV1().Services(ns.Name).Create(tCtx, tc.service, metav1.CreateOptions{}) | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Error creating service: %v", err) | ||||
| 				} | ||||
| @@ -268,7 +268,7 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| 			if tc.customEndpoints != nil { | ||||
| 				resourceName = tc.customEndpoints.Name | ||||
| 				tc.customEndpoints.Namespace = ns.Name | ||||
| 				_, err = client.CoreV1().Endpoints(ns.Name).Create(ctx, tc.customEndpoints, metav1.CreateOptions{}) | ||||
| 				_, err = client.CoreV1().Endpoints(ns.Name).Create(tCtx, tc.customEndpoints, metav1.CreateOptions{}) | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Error creating endpoints: %v", err) | ||||
| 				} | ||||
| @@ -276,7 +276,7 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
|  | ||||
| 			err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 				lSelector := discovery.LabelServiceName + "=" + resourceName | ||||
| 				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector}) | ||||
| 				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(tCtx, metav1.ListOptions{LabelSelector: lSelector}) | ||||
| 				if err != nil { | ||||
| 					t.Logf("Error listing EndpointSlices: %v", err) | ||||
| 					return false, err | ||||
| @@ -312,7 +312,6 @@ func TestEndpointSliceMirroring(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestEndpointSliceMirroringUpdates(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
| 	defer server.TearDownFn() | ||||
| @@ -325,8 +324,9 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) { | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informers := informers.NewSharedInformerFactory(client, resyncPeriod) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	epsmController := endpointslicemirroring.NewController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| 		informers.Discovery().V1().EndpointSlices(), | ||||
| 		informers.Core().V1().Services(), | ||||
| @@ -335,10 +335,8 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go epsmController.Run(ctx, 1) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go epsmController.Run(tCtx, 1) | ||||
|  | ||||
| 	testCases := []struct { | ||||
| 		testName      string | ||||
| @@ -405,19 +403,19 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) { | ||||
| 				}}, | ||||
| 			} | ||||
|  | ||||
| 			_, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{}) | ||||
| 			_, err = client.CoreV1().Services(ns.Name).Create(tCtx, service, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Error creating service: %v", err) | ||||
| 			} | ||||
|  | ||||
| 			_, err = client.CoreV1().Endpoints(ns.Name).Create(ctx, customEndpoints, metav1.CreateOptions{}) | ||||
| 			_, err = client.CoreV1().Endpoints(ns.Name).Create(tCtx, customEndpoints, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Error creating endpoints: %v", err) | ||||
| 			} | ||||
|  | ||||
| 			// update endpoint | ||||
| 			tc.tweakEndpoint(customEndpoints) | ||||
| 			_, err = client.CoreV1().Endpoints(ns.Name).Update(ctx, customEndpoints, metav1.UpdateOptions{}) | ||||
| 			_, err = client.CoreV1().Endpoints(ns.Name).Update(tCtx, customEndpoints, metav1.UpdateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Error updating endpoints: %v", err) | ||||
| 			} | ||||
| @@ -425,7 +423,7 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) { | ||||
| 			// verify the endpoint updates were mirrored | ||||
| 			err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) { | ||||
| 				lSelector := discovery.LabelServiceName + "=" + service.Name | ||||
| 				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector}) | ||||
| 				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(tCtx, metav1.ListOptions{LabelSelector: lSelector}) | ||||
| 				if err != nil { | ||||
| 					t.Logf("Error listing EndpointSlices: %v", err) | ||||
| 					return false, err | ||||
| @@ -489,7 +487,6 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestEndpointSliceMirroringSelectorTransition(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
| 	defer server.TearDownFn() | ||||
| @@ -502,8 +499,9 @@ func TestEndpointSliceMirroringSelectorTransition(t *testing.T) { | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informers := informers.NewSharedInformerFactory(client, resyncPeriod) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	epsmController := endpointslicemirroring.NewController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Endpoints(), | ||||
| 		informers.Discovery().V1().EndpointSlices(), | ||||
| 		informers.Core().V1().Services(), | ||||
| @@ -512,10 +510,8 @@ func TestEndpointSliceMirroringSelectorTransition(t *testing.T) { | ||||
| 		1*time.Second) | ||||
|  | ||||
| 	// Start informer and controllers | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go epsmController.Run(ctx, 1) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go epsmController.Run(tCtx, 1) | ||||
|  | ||||
| 	testCases := []struct { | ||||
| 		testName               string | ||||
|   | ||||
| @@ -29,10 +29,10 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/endpointslice" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	utilpointer "k8s.io/utils/pointer" | ||||
| ) | ||||
|  | ||||
| @@ -116,9 +116,9 @@ func TestEndpointSliceTerminating(t *testing.T) { | ||||
| 			resyncPeriod := 12 * time.Hour | ||||
| 			informers := informers.NewSharedInformerFactory(client, resyncPeriod) | ||||
|  | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			tCtx := ktesting.Init(t) | ||||
| 			epsController := endpointslice.NewController( | ||||
| 				ctx, | ||||
| 				tCtx, | ||||
| 				informers.Core().V1().Pods(), | ||||
| 				informers.Core().V1().Services(), | ||||
| 				informers.Core().V1().Nodes(), | ||||
| @@ -128,10 +128,8 @@ func TestEndpointSliceTerminating(t *testing.T) { | ||||
| 				1*time.Second) | ||||
|  | ||||
| 			// Start informer and controllers | ||||
| 			ctx, cancel := context.WithCancel(ctx) | ||||
| 			defer cancel() | ||||
| 			informers.Start(ctx.Done()) | ||||
| 			go epsController.Run(ctx, 1) | ||||
| 			informers.Start(tCtx.Done()) | ||||
| 			go epsController.Run(tCtx, 1) | ||||
|  | ||||
| 			// Create namespace | ||||
| 			ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t) | ||||
|   | ||||
| @@ -47,11 +47,11 @@ import ( | ||||
| 	"k8s.io/client-go/tools/cache" | ||||
| 	"k8s.io/controller-manager/pkg/informerfactory" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/garbagecollector" | ||||
| 	"k8s.io/kubernetes/test/integration" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	"k8s.io/utils/ptr" | ||||
| ) | ||||
|  | ||||
| @@ -247,9 +247,13 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work | ||||
| 	} | ||||
| 	sharedInformers := informers.NewSharedInformerFactory(clientSet, 0) | ||||
| 	metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0) | ||||
|  | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	alwaysStarted := make(chan struct{}) | ||||
| 	close(alwaysStarted) | ||||
| 	gc, err := garbagecollector.NewGarbageCollector( | ||||
| 		ctx, | ||||
| 		clientSet, | ||||
| 		metadataClient, | ||||
| 		restMapper, | ||||
| @@ -261,8 +265,6 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work | ||||
| 		t.Fatalf("failed to create garbage collector: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	tearDown := func() { | ||||
| 		cancel() | ||||
| 		result.TearDownFn() | ||||
|   | ||||
| @@ -37,13 +37,13 @@ import ( | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	watchtools "k8s.io/client-go/tools/watch" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	"k8s.io/kubernetes/cmd/kube-apiserver/app/options" | ||||
| 	"k8s.io/kubernetes/pkg/controller" | ||||
| 	replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" | ||||
| 	resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota" | ||||
| 	quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| @@ -60,7 +60,7 @@ const ( | ||||
| //	quota_test.go:100: Took 4.196205966s to scale up without quota | ||||
| //	quota_test.go:115: Took 12.021640372s to scale up with quota | ||||
| func TestQuota(t *testing.T) { | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| @@ -82,7 +82,7 @@ func TestQuota(t *testing.T) { | ||||
|  | ||||
| 	informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) | ||||
| 	rm := replicationcontroller.NewReplicationManager( | ||||
| 		logger, | ||||
| 		ctx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().ReplicationControllers(), | ||||
| 		clientset, | ||||
| @@ -291,12 +291,10 @@ plugins: | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	tCtx := ktesting.Init(t) | ||||
|  | ||||
| 	// Set up an API server | ||||
| 	_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ | ||||
| 	_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{ | ||||
| 		ModifyServerRunOptions: func(opts *options.ServerRunOptions) { | ||||
| 			// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 			opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} | ||||
| @@ -313,13 +311,13 @@ plugins: | ||||
|  | ||||
| 	informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) | ||||
| 	rm := replicationcontroller.NewReplicationManager( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().ReplicationControllers(), | ||||
| 		clientset, | ||||
| 		replicationcontroller.BurstReplicas, | ||||
| 	) | ||||
| 	go rm.Run(ctx, 3) | ||||
| 	go rm.Run(tCtx, 3) | ||||
|  | ||||
| 	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources | ||||
| 	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource) | ||||
| @@ -336,16 +334,16 @@ plugins: | ||||
| 		InformersStarted:          informersStarted, | ||||
| 		Registry:                  generic.NewRegistry(qc.Evaluators()), | ||||
| 	} | ||||
| 	resourceQuotaController, err := resourcequotacontroller.NewController(ctx, resourceQuotaControllerOptions) | ||||
| 	resourceQuotaController, err := resourcequotacontroller.NewController(tCtx, resourceQuotaControllerOptions) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected err: %v", err) | ||||
| 	} | ||||
| 	go resourceQuotaController.Run(ctx, 2) | ||||
| 	go resourceQuotaController.Run(tCtx, 2) | ||||
|  | ||||
| 	// Periodically the quota controller to detect new resource types | ||||
| 	go resourceQuotaController.Sync(ctx, discoveryFunc, 30*time.Second) | ||||
| 	go resourceQuotaController.Sync(tCtx, discoveryFunc, 30*time.Second) | ||||
|  | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	close(informersStarted) | ||||
|  | ||||
| 	// try to create a pod | ||||
| @@ -363,7 +361,7 @@ plugins: | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	if _, err := clientset.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err == nil { | ||||
| 	if _, err := clientset.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err == nil { | ||||
| 		t.Fatalf("expected error for insufficient quota") | ||||
| 	} | ||||
|  | ||||
| @@ -386,7 +384,7 @@ plugins: | ||||
| 	// attempt to create a new pod once the quota is propagated | ||||
| 	err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { | ||||
| 		// retry until we succeed (to allow time for all changes to propagate) | ||||
| 		if _, err := clientset.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err == nil { | ||||
| 		if _, err := clientset.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err == nil { | ||||
| 			return true, nil | ||||
| 		} | ||||
| 		return false, nil | ||||
| @@ -419,12 +417,10 @@ plugins: | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	tCtx := ktesting.Init(t) | ||||
|  | ||||
| 	// Set up an API server | ||||
| 	_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ | ||||
| 	_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{ | ||||
| 		ModifyServerRunOptions: func(opts *options.ServerRunOptions) { | ||||
| 			// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 			opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"} | ||||
| @@ -441,13 +437,13 @@ plugins: | ||||
|  | ||||
| 	informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) | ||||
| 	rm := replicationcontroller.NewReplicationManager( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().ReplicationControllers(), | ||||
| 		clientset, | ||||
| 		replicationcontroller.BurstReplicas, | ||||
| 	) | ||||
| 	go rm.Run(ctx, 3) | ||||
| 	go rm.Run(tCtx, 3) | ||||
|  | ||||
| 	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources | ||||
| 	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource) | ||||
| @@ -464,16 +460,16 @@ plugins: | ||||
| 		InformersStarted:          informersStarted, | ||||
| 		Registry:                  generic.NewRegistry(qc.Evaluators()), | ||||
| 	} | ||||
| 	resourceQuotaController, err := resourcequotacontroller.NewController(ctx, resourceQuotaControllerOptions) | ||||
| 	resourceQuotaController, err := resourcequotacontroller.NewController(tCtx, resourceQuotaControllerOptions) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected err: %v", err) | ||||
| 	} | ||||
| 	go resourceQuotaController.Run(ctx, 2) | ||||
| 	go resourceQuotaController.Run(tCtx, 2) | ||||
|  | ||||
| 	// Periodically the quota controller to detect new resource types | ||||
| 	go resourceQuotaController.Sync(ctx, discoveryFunc, 30*time.Second) | ||||
| 	go resourceQuotaController.Sync(tCtx, discoveryFunc, 30*time.Second) | ||||
|  | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	close(informersStarted) | ||||
|  | ||||
| 	// now create a covering quota | ||||
| @@ -496,14 +492,14 @@ plugins: | ||||
|  | ||||
| 	// Creating the first node port service should succeed | ||||
| 	nodePortService := newService("np-svc", v1.ServiceTypeNodePort, true) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, nodePortService, metav1.CreateOptions{}) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, nodePortService, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("creating first node port Service should not have returned error: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	// Creating the first loadbalancer service should succeed | ||||
| 	lbServiceWithNodePort1 := newService("lb-svc-withnp1", v1.ServiceTypeLoadBalancer, true) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, lbServiceWithNodePort1, metav1.CreateOptions{}) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, lbServiceWithNodePort1, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("creating first loadbalancer Service should not have returned error: %v", err) | ||||
| 	} | ||||
| @@ -522,7 +518,7 @@ plugins: | ||||
|  | ||||
| 	// Creating a loadbalancer Service without node ports should succeed | ||||
| 	lbServiceWithoutNodePort1 := newService("lb-svc-wonp1", v1.ServiceTypeLoadBalancer, false) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, lbServiceWithoutNodePort1, metav1.CreateOptions{}) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, lbServiceWithoutNodePort1, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("creating another loadbalancer Service without node ports should not have returned error: %v", err) | ||||
| 	} | ||||
| @@ -541,7 +537,7 @@ plugins: | ||||
|  | ||||
| 	// Creating a ClusterIP Service should succeed | ||||
| 	clusterIPService1 := newService("clusterip-svc1", v1.ServiceTypeClusterIP, false) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, clusterIPService1, metav1.CreateOptions{}) | ||||
| 	_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, clusterIPService1, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("creating a cluster IP Service should not have returned error: %v", err) | ||||
| 	} | ||||
|   | ||||
| @@ -40,7 +40,6 @@ import ( | ||||
| 	"k8s.io/client-go/tools/cache" | ||||
| 	"k8s.io/client-go/util/retry" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	podutil "k8s.io/kubernetes/pkg/api/v1/pod" | ||||
| 	"k8s.io/kubernetes/pkg/apis/core" | ||||
| @@ -48,6 +47,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	testutil "k8s.io/kubernetes/test/utils" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	"k8s.io/utils/ptr" | ||||
| ) | ||||
|  | ||||
| @@ -118,7 +118,8 @@ func newMatchingPod(podName, namespace string) *v1.Pod { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) { | ||||
| func rmSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) { | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
|  | ||||
| @@ -129,17 +130,21 @@ func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.Repli | ||||
| 	} | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rs-informers")), resyncPeriod) | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
|  | ||||
| 	rm := replicaset.NewReplicaSetController( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		informers.Apps().V1().ReplicaSets(), | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")), | ||||
| 		replicaset.BurstReplicas, | ||||
| 	) | ||||
|  | ||||
| 	return server.TearDownFn, rm, informers, clientSet | ||||
| 	newTeardown := func() { | ||||
| 		tCtx.Cancel("tearing down controller") | ||||
| 		server.TearDownFn() | ||||
| 	} | ||||
|  | ||||
| 	return tCtx, newTeardown, rm, informers, clientSet | ||||
| } | ||||
|  | ||||
| func rmSimpleSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, clientset.Interface) { | ||||
| @@ -426,22 +431,23 @@ func TestAdoption(t *testing.T) { | ||||
| 	} | ||||
| 	for i, tc := range testCases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			closeFn, rm, informers, clientSet := rmSetup(t) | ||||
| 			tCtx, closeFn, rm, informers, clientSet := rmSetup(t) | ||||
| 			defer closeFn() | ||||
|  | ||||
| 			ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rs-adoption-%d", i), t) | ||||
| 			defer framework.DeleteNamespaceOrDie(clientSet, ns, t) | ||||
|  | ||||
| 			rsClient := clientSet.AppsV1().ReplicaSets(ns.Name) | ||||
| 			podClient := clientSet.CoreV1().Pods(ns.Name) | ||||
| 			const rsName = "rs" | ||||
| 			rs, err := rsClient.Create(context.TODO(), newRS(rsName, ns.Name, 1), metav1.CreateOptions{}) | ||||
| 			rs, err := rsClient.Create(tCtx, newRS(rsName, ns.Name, 1), metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create replica set: %v", err) | ||||
| 			} | ||||
| 			podName := fmt.Sprintf("pod%d", i) | ||||
| 			pod := newMatchingPod(podName, ns.Name) | ||||
| 			pod.OwnerReferences = tc.existingOwnerReferences(rs) | ||||
| 			_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) | ||||
| 			_, err = podClient.Create(tCtx, pod, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create Pod: %v", err) | ||||
| 			} | ||||
| @@ -449,7 +455,7 @@ func TestAdoption(t *testing.T) { | ||||
| 			stopControllers := runControllerAndInformers(t, rm, informers, 1) | ||||
| 			defer stopControllers() | ||||
| 			if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 				updatedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 				updatedPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 				if err != nil { | ||||
| 					return false, err | ||||
| 				} | ||||
| @@ -497,7 +503,7 @@ func TestRSSelectorImmutability(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestSpecReplicasChange(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -528,7 +534,7 @@ func TestSpecReplicasChange(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) | ||||
| 		newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -539,8 +545,9 @@ func TestSpecReplicasChange(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestDeletingAndFailedPods(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
|  | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| 	stopControllers := runControllerAndInformers(t, rm, informers, 0) | ||||
| @@ -564,7 +571,7 @@ func TestDeletingAndFailedPods(t *testing.T) { | ||||
| 	updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { | ||||
| 		pod.Finalizers = []string{"fake.example.com/blockDeletion"} | ||||
| 	}) | ||||
| 	if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil { | ||||
| 	if err := c.CoreV1().Pods(ns.Name).Delete(tCtx, deletingPod.Name, metav1.DeleteOptions{}); err != nil { | ||||
| 		t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) | ||||
| 	} | ||||
|  | ||||
| @@ -642,7 +649,7 @@ func TestPodDeletionCost(t *testing.T) { | ||||
| 	for _, tc := range tests { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDeletionCost, tc.enabled)() | ||||
| 			closeFn, rm, informers, c := rmSetup(t) | ||||
| 			_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 			defer closeFn() | ||||
| 			ns := framework.CreateNamespaceOrDie(c, tc.name, t) | ||||
| 			defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -701,7 +708,7 @@ func TestPodDeletionCost(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestOverlappingRSs(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rss", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -725,7 +732,7 @@ func TestOverlappingRSs(t *testing.T) { | ||||
|  | ||||
| 	// Expect both RSs have .status.replicas = .spec.replicas | ||||
| 	for i := 0; i < 2; i++ { | ||||
| 		newRS, err := c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{}) | ||||
| 		newRS, err := c.AppsV1().ReplicaSets(ns.Name).Get(tCtx, fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("failed to obtain rs rs-%d: %v", i+1, err) | ||||
| 		} | ||||
| @@ -736,7 +743,7 @@ func TestOverlappingRSs(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -765,7 +772,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| 		pod.Labels = newLabelMap | ||||
| 	}) | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 		newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -780,7 +787,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| 		pod.Labels = labelMap() | ||||
| 	}) | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 		newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			// If the pod is not found, it means the RS picks the pod for deletion (it is extra) | ||||
| 			// Verify there is only one pod in namespace and it has ControllerRef to the RS | ||||
| @@ -814,7 +821,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestGeneralPodAdoption(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -846,7 +853,7 @@ func TestGeneralPodAdoption(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestReadyAndAvailableReplicas(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -886,7 +893,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { | ||||
|  | ||||
| 	rsClient := c.AppsV1().ReplicaSets(ns.Name) | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) | ||||
| 		newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -898,7 +905,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestRSScaleSubresource(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-rs-scale-subresource", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -917,7 +924,7 @@ func TestRSScaleSubresource(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestExtraPodsAdoptionAndDeletion(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -949,7 +956,7 @@ func TestExtraPodsAdoptionAndDeletion(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestFullyLabeledReplicas(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -981,7 +988,7 @@ func TestFullyLabeledReplicas(t *testing.T) { | ||||
|  | ||||
| 	// Verify only one pod is fully labeled | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) | ||||
| 		newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -992,7 +999,7 @@ func TestFullyLabeledReplicas(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-default-gc-v1", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -1014,14 +1021,14 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	rsClient := c.AppsV1().ReplicaSets(ns.Name) | ||||
| 	err := rsClient.Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}) | ||||
| 	err := rsClient.Delete(tCtx, rs.Name, metav1.DeleteOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to delete rs: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	// Verify no new finalizer has been added | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{}) | ||||
| 		newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -1047,5 +1054,5 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) { | ||||
| 		rs.Finalizers = finalizers | ||||
| 	}) | ||||
|  | ||||
| 	rsClient.Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}) | ||||
| 	_ = rsClient.Delete(tCtx, rs.Name, metav1.DeleteOptions{}) | ||||
| } | ||||
|   | ||||
| @@ -38,12 +38,12 @@ import ( | ||||
| 	"k8s.io/client-go/tools/cache" | ||||
| 	"k8s.io/client-go/util/retry" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	podutil "k8s.io/kubernetes/pkg/api/v1/pod" | ||||
| 	"k8s.io/kubernetes/pkg/controller/replication" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| @@ -111,7 +111,8 @@ func newMatchingPod(podName, namespace string) *v1.Pod { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) { | ||||
| func rmSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) { | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
|  | ||||
| @@ -123,16 +124,19 @@ func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replication.Repl | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rc-informers")), resyncPeriod) | ||||
|  | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	rm := replication.NewReplicationManager( | ||||
| 		logger, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().ReplicationControllers(), | ||||
| 		clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replication-controller")), | ||||
| 		replication.BurstReplicas, | ||||
| 	) | ||||
| 	newTeardown := func() { | ||||
| 		tCtx.Cancel("tearing down controller") | ||||
| 		server.TearDownFn() | ||||
| 	} | ||||
|  | ||||
| 	return server.TearDownFn, rm, informers, clientSet | ||||
| 	return tCtx, newTeardown, rm, informers, clientSet | ||||
| } | ||||
|  | ||||
| // Run RC controller and informers | ||||
| @@ -414,7 +418,7 @@ func TestAdoption(t *testing.T) { | ||||
| 	} | ||||
| 	for i, tc := range testCases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			closeFn, rm, informers, clientSet := rmSetup(t) | ||||
| 			tCtx, closeFn, rm, informers, clientSet := rmSetup(t) | ||||
| 			defer closeFn() | ||||
| 			ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rc-adoption-%d", i), t) | ||||
| 			defer framework.DeleteNamespaceOrDie(clientSet, ns, t) | ||||
| @@ -422,14 +426,14 @@ func TestAdoption(t *testing.T) { | ||||
| 			rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) | ||||
| 			podClient := clientSet.CoreV1().Pods(ns.Name) | ||||
| 			const rcName = "rc" | ||||
| 			rc, err := rcClient.Create(context.TODO(), newRC(rcName, ns.Name, 1), metav1.CreateOptions{}) | ||||
| 			rc, err := rcClient.Create(tCtx, newRC(rcName, ns.Name, 1), metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create replication controllers: %v", err) | ||||
| 			} | ||||
| 			podName := fmt.Sprintf("pod%d", i) | ||||
| 			pod := newMatchingPod(podName, ns.Name) | ||||
| 			pod.OwnerReferences = tc.existingOwnerReferences(rc) | ||||
| 			_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) | ||||
| 			_, err = podClient.Create(tCtx, pod, metav1.CreateOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create Pod: %v", err) | ||||
| 			} | ||||
| @@ -437,7 +441,7 @@ func TestAdoption(t *testing.T) { | ||||
| 			stopControllers := runControllerAndInformers(t, rm, informers, 1) | ||||
| 			defer stopControllers() | ||||
| 			if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 				updatedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 				updatedPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 				if err != nil { | ||||
| 					return false, err | ||||
| 				} | ||||
| @@ -457,7 +461,7 @@ func TestAdoption(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestSpecReplicasChange(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -488,7 +492,7 @@ func TestSpecReplicasChange(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) | ||||
| 		newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -500,7 +504,7 @@ func TestSpecReplicasChange(t *testing.T) { | ||||
|  | ||||
| func TestLogarithmicScaleDown(t *testing.T) { | ||||
| 	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, true)() | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -513,7 +517,7 @@ func TestLogarithmicScaleDown(t *testing.T) { | ||||
| 	waitRCStable(t, c, rc) | ||||
|  | ||||
| 	// get list of pods in the cluster | ||||
| 	pods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), metav1.ListOptions{}) | ||||
| 	pods, err := c.CoreV1().Pods(ns.Name).List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err) | ||||
| 	} | ||||
| @@ -526,7 +530,7 @@ func TestLogarithmicScaleDown(t *testing.T) { | ||||
| 	// (meaning the 3rd one was deleted) | ||||
| 	scaleRC(t, c, rc, 2) | ||||
|  | ||||
| 	newPods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), metav1.ListOptions{}) | ||||
| 	newPods, err := c.CoreV1().Pods(ns.Name).List(tCtx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err) | ||||
| 	} | ||||
| @@ -537,7 +541,7 @@ func TestLogarithmicScaleDown(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestDeletingAndFailedPods(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -562,7 +566,7 @@ func TestDeletingAndFailedPods(t *testing.T) { | ||||
| 	updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { | ||||
| 		pod.Finalizers = []string{"fake.example.com/blockDeletion"} | ||||
| 	}) | ||||
| 	if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil { | ||||
| 	if err := c.CoreV1().Pods(ns.Name).Delete(tCtx, deletingPod.Name, metav1.DeleteOptions{}); err != nil { | ||||
| 		t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) | ||||
| 	} | ||||
|  | ||||
| @@ -602,7 +606,7 @@ func TestDeletingAndFailedPods(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestOverlappingRCs(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rcs", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -626,7 +630,7 @@ func TestOverlappingRCs(t *testing.T) { | ||||
|  | ||||
| 	// Expect both RCs have .status.replicas = .spec.replicas | ||||
| 	for i := 0; i < 2; i++ { | ||||
| 		newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(context.TODO(), fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{}) | ||||
| 		newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(tCtx, fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("failed to obtain rc rc-%d: %v", i+1, err) | ||||
| 		} | ||||
| @@ -637,7 +641,7 @@ func TestOverlappingRCs(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -666,7 +670,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| 		pod.Labels = newLabelMap | ||||
| 	}) | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 		newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -681,7 +685,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| 		pod.Labels = labelMap() | ||||
| 	}) | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 		newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			// If the pod is not found, it means the RC picks the pod for deletion (it is extra) | ||||
| 			// Verify there is only one pod in namespace and it has ControllerRef to the RC | ||||
| @@ -715,7 +719,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestGeneralPodAdoption(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -747,7 +751,7 @@ func TestGeneralPodAdoption(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestReadyAndAvailableReplicas(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -787,7 +791,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { | ||||
|  | ||||
| 	rcClient := c.CoreV1().ReplicationControllers(ns.Name) | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) | ||||
| 		newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -799,7 +803,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestRCScaleSubresource(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-rc-scale-subresource", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -818,7 +822,7 @@ func TestRCScaleSubresource(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestExtraPodsAdoptionAndDeletion(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	_, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -850,7 +854,7 @@ func TestExtraPodsAdoptionAndDeletion(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestFullyLabeledReplicas(t *testing.T) { | ||||
| 	closeFn, rm, informers, c := rmSetup(t) | ||||
| 	tCtx, closeFn, rm, informers, c := rmSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| @@ -882,7 +886,7 @@ func TestFullyLabeledReplicas(t *testing.T) { | ||||
|  | ||||
| 	// Verify only one pod is fully labeled | ||||
| 	if err := wait.PollImmediate(interval, timeout, func() (bool, error) { | ||||
| 		newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) | ||||
| 		newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
|   | ||||
| @@ -19,7 +19,6 @@ package taint | ||||
| // This file tests the Taint feature. | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| @@ -85,7 +84,7 @@ func TestTaintNodeByCondition(t *testing.T) { | ||||
|  | ||||
| 	// Start NodeLifecycleController for taint. | ||||
| 	nc, err := nodelifecycle.NewNodeLifecycleController( | ||||
| 		context.TODO(), | ||||
| 		testCtx.Ctx, | ||||
| 		externalInformers.Coordination().V1().Leases(), | ||||
| 		externalInformers.Core().V1().Pods(), | ||||
| 		externalInformers.Core().V1().Nodes(), | ||||
|   | ||||
| @@ -36,6 +36,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| // TestMigrateServiceCIDR validates the steps necessary to migrate a cluster default ServiceCIDR | ||||
| @@ -49,8 +50,7 @@ import ( | ||||
| // 7. delete the kubernetes.default service, the new apiserver will recreate it within the new ServiceCIDR | ||||
| func TestMigrateServiceCIDR(t *testing.T) { | ||||
| 	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)() | ||||
| 	ctx, cancelFn := context.WithCancel(context.Background()) | ||||
| 	defer cancelFn() | ||||
| 	tCtx := ktesting.Init(t) | ||||
|  | ||||
| 	cidr1 := "192.168.0.0/29" | ||||
| 	cidr2 := "10.168.0.0/24" | ||||
| @@ -78,11 +78,12 @@ func TestMigrateServiceCIDR(t *testing.T) { | ||||
| 	informers1 := informers.NewSharedInformerFactory(client1, resyncPeriod) | ||||
| 	// ServiceCIDR controller | ||||
| 	go servicecidrs.NewController( | ||||
| 		tCtx, | ||||
| 		informers1.Networking().V1alpha1().ServiceCIDRs(), | ||||
| 		informers1.Networking().V1alpha1().IPAddresses(), | ||||
| 		client1, | ||||
| 	).Run(ctx, 5) | ||||
| 	informers1.Start(ctx.Done()) | ||||
| 	).Run(tCtx, 5) | ||||
| 	informers1.Start(tCtx.Done()) | ||||
|  | ||||
| 	// the default serviceCIDR should have a finalizer and ready condition set to true | ||||
| 	if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) { | ||||
| @@ -203,19 +204,20 @@ func TestMigrateServiceCIDR(t *testing.T) { | ||||
| 	defer framework.DeleteNamespaceOrDie(client2, ns, t) | ||||
|  | ||||
| 	// switch the controller to the new apiserver | ||||
| 	cancelFn() | ||||
| 	tCtx.Cancel("tearing down ServiceCIDR controller 1") | ||||
| 	s1.TearDownFn() | ||||
|  | ||||
| 	// ServiceCIDR controller | ||||
| 	ctx2, cancelFn2 := context.WithCancel(context.Background()) | ||||
| 	defer cancelFn2() | ||||
| 	tCtx2 := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("tearing down ServiceCIDR controller 2") | ||||
| 	informers2 := informers.NewSharedInformerFactory(client2, resyncPeriod) | ||||
| 	go servicecidrs.NewController( | ||||
| 		tCtx2, | ||||
| 		informers2.Networking().V1alpha1().ServiceCIDRs(), | ||||
| 		informers2.Networking().V1alpha1().IPAddresses(), | ||||
| 		client2, | ||||
| 	).Run(ctx2, 5) | ||||
| 	informers2.Start(ctx2.Done()) | ||||
| 	).Run(tCtx2, 5) | ||||
| 	informers2.Start(tCtx2.Done()) | ||||
|  | ||||
| 	// delete the kubernetes.default service so the old DefaultServiceCIDR can be deleted | ||||
| 	// and the new apiserver can take over | ||||
|   | ||||
| @@ -65,6 +65,7 @@ func TestServiceAllocNewServiceCIDR(t *testing.T) { | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod) | ||||
| 	go servicecidrs.NewController( | ||||
| 		ctx, | ||||
| 		informerFactory.Networking().V1alpha1().ServiceCIDRs(), | ||||
| 		informerFactory.Networking().V1alpha1().IPAddresses(), | ||||
| 		client, | ||||
| @@ -165,6 +166,7 @@ func TestServiceCIDRDeletion(t *testing.T) { | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod) | ||||
| 	go servicecidrs.NewController( | ||||
| 		ctx, | ||||
| 		informerFactory.Networking().V1alpha1().ServiceCIDRs(), | ||||
| 		informerFactory.Networking().V1alpha1().IPAddresses(), | ||||
| 		client, | ||||
|   | ||||
| @@ -37,13 +37,13 @@ import ( | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	restclient "k8s.io/client-go/rest" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	podutil "k8s.io/kubernetes/pkg/api/v1/pod" | ||||
| 	"k8s.io/kubernetes/pkg/controller/statefulset" | ||||
| 	"k8s.io/kubernetes/pkg/controlplane" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	"k8s.io/utils/ptr" | ||||
| ) | ||||
|  | ||||
| @@ -126,12 +126,11 @@ func TestVolumeTemplateNoopUpdate(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestSpecReplicasChange(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	closeFn, rm, informers, c := scSetup(ctx, t) | ||||
| 	tCtx, closeFn, rm, informers, c := scSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| 	cancel := runControllerAndInformers(rm, informers) | ||||
| 	cancel := runControllerAndInformers(tCtx, rm, informers) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	createHeadlessService(t, c, newHeadlessService(ns.Name)) | ||||
| @@ -170,12 +169,11 @@ func TestSpecReplicasChange(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestDeletingAndTerminatingPods(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	closeFn, rm, informers, c := scSetup(ctx, t) | ||||
| 	tCtx, closeFn, rm, informers, c := scSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| 	cancel := runControllerAndInformers(rm, informers) | ||||
| 	cancel := runControllerAndInformers(tCtx, rm, informers) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	podCount := 3 | ||||
| @@ -289,12 +287,11 @@ func TestStatefulSetAvailable(t *testing.T) { | ||||
| 	} | ||||
| 	for _, test := range tests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			_, ctx := ktesting.NewTestContext(t) | ||||
| 			closeFn, rm, informers, c := scSetup(ctx, t) | ||||
| 			tCtx, closeFn, rm, informers, c := scSetup(t) | ||||
| 			defer closeFn() | ||||
| 			ns := framework.CreateNamespaceOrDie(c, "test-available-pods", t) | ||||
| 			defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| 			cancel := runControllerAndInformers(rm, informers) | ||||
| 			cancel := runControllerAndInformers(tCtx, rm, informers) | ||||
| 			defer cancel() | ||||
|  | ||||
| 			labelMap := labelMap() | ||||
| @@ -380,12 +377,9 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1 | ||||
|  | ||||
| // add for issue: https://github.com/kubernetes/kubernetes/issues/108837 | ||||
| func TestStatefulSetStatusWithPodFail(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	limitedPodNumber := 2 | ||||
| 	c, config, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ | ||||
| 	c, config, closeFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{ | ||||
| 		ModifyServerConfig: func(config *controlplane.Config) { | ||||
| 			config.GenericConfig.AdmissionControl = &fakePodFailAdmission{ | ||||
| 				limitedPodNumber: limitedPodNumber, | ||||
| @@ -393,11 +387,11 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { | ||||
| 		}, | ||||
| 	}) | ||||
| 	defer closeFn() | ||||
|  | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	resyncPeriod := 12 * time.Hour | ||||
| 	informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod) | ||||
| 	ssc := statefulset.NewStatefulSetController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Apps().V1().StatefulSets(), | ||||
| 		informers.Core().V1().PersistentVolumeClaims(), | ||||
| @@ -408,11 +402,11 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-pod-fail", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
|  | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go ssc.Run(ctx, 5) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go ssc.Run(tCtx, 5) | ||||
|  | ||||
| 	sts := newSTS("sts", ns.Name, 4) | ||||
| 	_, err := c.AppsV1().StatefulSets(sts.Namespace).Create(ctx, sts, metav1.CreateOptions{}) | ||||
| 	_, err := c.AppsV1().StatefulSets(sts.Namespace).Create(tCtx, sts, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Could not create statefulSet %s: %v", sts.Name, err) | ||||
| 	} | ||||
| @@ -420,7 +414,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) { | ||||
| 	wantReplicas := limitedPodNumber | ||||
| 	var gotReplicas int32 | ||||
| 	if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { | ||||
| 		newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(ctx, sts.Name, metav1.GetOptions{}) | ||||
| 		newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(tCtx, sts.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| @@ -477,10 +471,10 @@ func TestAutodeleteOwnerRefs(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)() | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	closeFn, rm, informers, c := scSetup(ctx, t) | ||||
|  | ||||
| 	tCtx, closeFn, rm, informers, c := scSetup(t) | ||||
| 	defer closeFn() | ||||
| 	cancel := runControllerAndInformers(rm, informers) | ||||
| 	cancel := runControllerAndInformers(tCtx, rm, informers) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	for _, test := range tests { | ||||
| @@ -521,12 +515,11 @@ func TestAutodeleteOwnerRefs(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestDeletingPodForRollingUpdatePartition(t *testing.T) { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	closeFn, rm, informers, c := scSetup(ctx, t) | ||||
| 	tCtx, closeFn, rm, informers, c := scSetup(t) | ||||
| 	defer closeFn() | ||||
| 	ns := framework.CreateNamespaceOrDie(c, "test-deleting-pod-for-rolling-update-partition", t) | ||||
| 	defer framework.DeleteNamespaceOrDie(c, ns, t) | ||||
| 	cancel := runControllerAndInformers(rm, informers) | ||||
| 	cancel := runControllerAndInformers(tCtx, rm, informers) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	labelMap := labelMap() | ||||
| @@ -570,7 +563,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) { | ||||
| 	}) | ||||
|  | ||||
| 	// Await for the pod-1 to be recreated, while pod-0 remains running | ||||
| 	if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 	if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 		ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| @@ -599,7 +592,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// Await for pod-0 to be not ready | ||||
| 	if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 	if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 		ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| @@ -615,7 +608,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) { | ||||
| 	}) | ||||
|  | ||||
| 	// Await for pod-0 to be recreated and make it running | ||||
| 	if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 	if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 		pods := getPods(t, podClient, labelMap) | ||||
| 		recreatedPods := v1.PodList{} | ||||
| 		for _, pod := range pods.Items { | ||||
| @@ -630,7 +623,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// Await for all stateful set status to record all replicas as ready | ||||
| 	if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 	if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { | ||||
| 		ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| @@ -702,10 +695,9 @@ func TestStatefulSetStartOrdinal(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetStartOrdinal, true)() | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	closeFn, rm, informers, c := scSetup(ctx, t) | ||||
| 	tCtx, closeFn, rm, informers, c := scSetup(t) | ||||
| 	defer closeFn() | ||||
| 	cancel := runControllerAndInformers(rm, informers) | ||||
| 	cancel := runControllerAndInformers(tCtx, rm, informers) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	for _, test := range tests { | ||||
|   | ||||
| @@ -19,6 +19,7 @@ package statefulset | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| 	"sync" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| @@ -161,7 +162,8 @@ func newStatefulSetPVC(name string) v1.PersistentVolumeClaim { | ||||
| } | ||||
|  | ||||
| // scSetup sets up necessities for Statefulset integration test, including control plane, apiserver, informers, and clientset | ||||
| func scSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) { | ||||
| func scSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) { | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
|  | ||||
| @@ -174,7 +176,7 @@ func scSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFu | ||||
| 	informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod) | ||||
|  | ||||
| 	sc := statefulset.NewStatefulSetController( | ||||
| 		ctx, | ||||
| 		tCtx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Apps().V1().StatefulSets(), | ||||
| 		informers.Core().V1().PersistentVolumeClaims(), | ||||
| @@ -182,12 +184,16 @@ func scSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFu | ||||
| 		clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-controller")), | ||||
| 	) | ||||
|  | ||||
| 	return server.TearDownFn, sc, informers, clientSet | ||||
| 	teardown := func() { | ||||
| 		tCtx.Cancel("tearing down controller") | ||||
| 		server.TearDownFn() | ||||
| 	} | ||||
| 	return tCtx, teardown, sc, informers, clientSet | ||||
| } | ||||
|  | ||||
| // Run STS controller and informers | ||||
| func runControllerAndInformers(sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) context.CancelFunc { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| func runControllerAndInformers(ctx context.Context, sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) context.CancelFunc { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go sc.Run(ctx, 5) | ||||
| 	return cancel | ||||
|   | ||||
| @@ -203,6 +203,7 @@ func CreateGCController(ctx context.Context, tb ktesting.TB, restConfig restclie | ||||
| 	alwaysStarted := make(chan struct{}) | ||||
| 	close(alwaysStarted) | ||||
| 	gc, err := garbagecollector.NewGarbageCollector( | ||||
| 		ctx, | ||||
| 		clientSet, | ||||
| 		metadataClient, | ||||
| 		restMapper, | ||||
| @@ -660,7 +661,6 @@ func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond | ||||
| // InitDisruptionController initializes and runs a Disruption Controller to properly | ||||
| // update PodDisuptionBudget objects. | ||||
| func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.DisruptionController { | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	informers := informers.NewSharedInformerFactory(testCtx.ClientSet, 12*time.Hour) | ||||
|  | ||||
| 	discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.ClientSet.Discovery()) | ||||
| @@ -674,7 +674,7 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di | ||||
| 	} | ||||
|  | ||||
| 	dc := disruption.NewDisruptionController( | ||||
| 		ctx, | ||||
| 		testCtx.Ctx, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Policy().V1().PodDisruptionBudgets(), | ||||
| 		informers.Core().V1().ReplicationControllers(), | ||||
|   | ||||
| @@ -31,7 +31,6 @@ import ( | ||||
| 	restclient "k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/tools/cache" | ||||
| 	fakecloud "k8s.io/cloud-provider/fake" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| 	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" | ||||
| 	"k8s.io/kubernetes/pkg/controller/volume/attachdetach" | ||||
| 	volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" | ||||
| @@ -41,6 +40,7 @@ import ( | ||||
| 	volumetest "k8s.io/kubernetes/pkg/volume/testing" | ||||
| 	"k8s.io/kubernetes/pkg/volume/util" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
| ) | ||||
|  | ||||
| func fakePodWithVol(namespace string) *v1.Pod { | ||||
| @@ -156,36 +156,35 @@ func TestPodDeletionWithDswp(t *testing.T) { | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
|  | ||||
| 	ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t) | ||||
| 	defer framework.DeleteNamespaceOrDie(testClient, ns, t) | ||||
|  | ||||
| 	pod := fakePodWithVol(namespaceName) | ||||
|  | ||||
| 	if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { | ||||
| 	if _, err := testClient.CoreV1().Nodes().Create(tCtx, node, metav1.CreateOptions{}); err != nil { | ||||
| 		t.Fatalf("Failed to created node : %v", err) | ||||
| 	} | ||||
|  | ||||
| 	// start controller loop | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	go informers.Core().V1().Nodes().Informer().Run(ctx.Done()) | ||||
| 	if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { | ||||
| 	go informers.Core().V1().Nodes().Informer().Run(tCtx.Done()) | ||||
| 	if _, err := testClient.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err != nil { | ||||
| 		t.Errorf("Failed to create pod : %v", err) | ||||
| 	} | ||||
|  | ||||
| 	podInformer := informers.Core().V1().Pods().Informer() | ||||
| 	go podInformer.Run(ctx.Done()) | ||||
| 	go podInformer.Run(tCtx.Done()) | ||||
|  | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) | ||||
| 	initCSIObjects(ctx.Done(), informers) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done()) | ||||
| 	initCSIObjects(tCtx.Done(), informers) | ||||
| 	go ctrl.Run(tCtx) | ||||
| 	// Run pvCtrl to avoid leaking goroutines started during its creation. | ||||
| 	go pvCtrl.Run(ctx) | ||||
| 	go pvCtrl.Run(tCtx) | ||||
|  | ||||
| 	waitToObservePods(t, podInformer, 1) | ||||
| 	podKey, err := cache.MetaNamespaceKeyFunc(pod) | ||||
| @@ -231,7 +230,9 @@ func TestPodUpdateWithWithADC(t *testing.T) { | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
|  | ||||
| 	ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t) | ||||
| 	defer framework.DeleteNamespaceOrDie(testClient, ns, t) | ||||
| @@ -254,16 +255,13 @@ func TestPodUpdateWithWithADC(t *testing.T) { | ||||
| 	go podInformer.Run(podStopCh) | ||||
|  | ||||
| 	// start controller loop | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) | ||||
| 	initCSIObjects(ctx.Done(), informers) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done()) | ||||
| 	initCSIObjects(tCtx.Done(), informers) | ||||
| 	go ctrl.Run(tCtx) | ||||
| 	// Run pvCtrl to avoid leaking goroutines started during its creation. | ||||
| 	go pvCtrl.Run(ctx) | ||||
| 	go pvCtrl.Run(tCtx) | ||||
|  | ||||
| 	waitToObservePods(t, podInformer, 1) | ||||
| 	podKey, err := cache.MetaNamespaceKeyFunc(pod) | ||||
| @@ -304,7 +302,9 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
|  | ||||
| 	ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t) | ||||
| 	defer framework.DeleteNamespaceOrDie(testClient, ns, t) | ||||
| @@ -327,16 +327,13 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { | ||||
| 	go podInformer.Run(podStopCh) | ||||
|  | ||||
| 	// start controller loop | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) | ||||
| 	initCSIObjects(ctx.Done(), informers) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done()) | ||||
| 	initCSIObjects(tCtx.Done(), informers) | ||||
| 	go ctrl.Run(tCtx) | ||||
| 	// Run pvCtrl to avoid leaking goroutines started during its creation. | ||||
| 	go pvCtrl.Run(ctx) | ||||
| 	go pvCtrl.Run(tCtx) | ||||
|  | ||||
| 	waitToObservePods(t, podInformer, 1) | ||||
| 	podKey, err := cache.MetaNamespaceKeyFunc(pod) | ||||
| @@ -402,7 +399,7 @@ func waitForPodFuncInDSWP(t *testing.T, dswp volumecache.DesiredStateOfWorld, ch | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, syncPeriod time.Duration, timers attachdetach.TimerConfig) (*clientset.Clientset, attachdetach.AttachDetachController, *persistentvolume.PersistentVolumeController, clientgoinformers.SharedInformerFactory) { | ||||
| func createAdClients(ctx context.Context, t *testing.T, server *kubeapiservertesting.TestServer, syncPeriod time.Duration, timers attachdetach.TimerConfig) (*clientset.Clientset, attachdetach.AttachDetachController, *persistentvolume.PersistentVolumeController, clientgoinformers.SharedInformerFactory) { | ||||
| 	config := restclient.CopyConfig(server.ClientConfig) | ||||
| 	config.QPS = 1000000 | ||||
| 	config.Burst = 1000000 | ||||
| @@ -425,9 +422,8 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync | ||||
| 	plugins := []volume.VolumePlugin{plugin} | ||||
| 	cloud := &fakecloud.Cloud{} | ||||
| 	informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod) | ||||
| 	logger, ctx := ktesting.NewTestContext(t) | ||||
| 	ctrl, err := attachdetach.NewAttachDetachController( | ||||
| 		logger, | ||||
| 		ctx, | ||||
| 		testClient, | ||||
| 		informers.Core().V1().Pods(), | ||||
| 		informers.Core().V1().Nodes(), | ||||
| @@ -488,7 +484,10 @@ func TestPodAddedByDswp(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig) | ||||
|  | ||||
| 	ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t) | ||||
| 	defer framework.DeleteNamespaceOrDie(testClient, ns, t) | ||||
| @@ -496,13 +495,13 @@ func TestPodAddedByDswp(t *testing.T) { | ||||
| 	pod := fakePodWithVol(namespaceName) | ||||
| 	podStopCh := make(chan struct{}) | ||||
|  | ||||
| 	if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { | ||||
| 	if _, err := testClient.CoreV1().Nodes().Create(tCtx, node, metav1.CreateOptions{}); err != nil { | ||||
| 		t.Fatalf("Failed to created node : %v", err) | ||||
| 	} | ||||
|  | ||||
| 	go informers.Core().V1().Nodes().Informer().Run(podStopCh) | ||||
|  | ||||
| 	if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { | ||||
| 	if _, err := testClient.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err != nil { | ||||
| 		t.Errorf("Failed to create pod : %v", err) | ||||
| 	} | ||||
|  | ||||
| @@ -510,17 +509,13 @@ func TestPodAddedByDswp(t *testing.T) { | ||||
| 	go podInformer.Run(podStopCh) | ||||
|  | ||||
| 	// start controller loop | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) | ||||
| 	initCSIObjects(ctx.Done(), informers) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done()) | ||||
| 	go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done()) | ||||
| 	initCSIObjects(tCtx.Done(), informers) | ||||
| 	go ctrl.Run(tCtx) | ||||
| 	// Run pvCtrl to avoid leaking goroutines started during its creation. | ||||
| 	go pvCtrl.Run(ctx) | ||||
| 	go pvCtrl.Run(tCtx) | ||||
|  | ||||
| 	waitToObservePods(t, podInformer, 1) | ||||
| 	podKey, err := cache.MetaNamespaceKeyFunc(pod) | ||||
| @@ -556,9 +551,13 @@ func TestPVCBoundWithADC(t *testing.T) { | ||||
| 	// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. | ||||
| 	server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) | ||||
| 	defer server.TearDownFn() | ||||
|  | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
|  | ||||
| 	namespaceName := "test-pod-deletion" | ||||
|  | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, attachdetach.TimerConfig{ | ||||
| 	testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, attachdetach.TimerConfig{ | ||||
| 		ReconcilerLoopPeriod:                        100 * time.Millisecond, | ||||
| 		ReconcilerMaxWaitForUnmountDuration:         6 * time.Second, | ||||
| 		DesiredStateOfWorldPopulatorLoopSleepPeriod: 24 * time.Hour, | ||||
| @@ -601,14 +600,11 @@ func TestPVCBoundWithADC(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// start controller loop | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	informers.WaitForCacheSync(ctx.Done()) | ||||
| 	initCSIObjects(ctx.Done(), informers) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	go pvCtrl.Run(ctx) | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	informers.WaitForCacheSync(tCtx.Done()) | ||||
| 	initCSIObjects(tCtx.Done(), informers) | ||||
| 	go ctrl.Run(tCtx) | ||||
| 	go pvCtrl.Run(tCtx) | ||||
|  | ||||
| 	waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4) | ||||
| 	// Give attachdetach controller enough time to populate pods into DSWP. | ||||
|   | ||||
| @@ -42,9 +42,9 @@ import ( | ||||
| 	volumetest "k8s.io/kubernetes/pkg/volume/testing" | ||||
| 	"k8s.io/kubernetes/pkg/volume/util" | ||||
| 	"k8s.io/kubernetes/test/integration/framework" | ||||
| 	"k8s.io/kubernetes/test/utils/ktesting" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
| ) | ||||
|  | ||||
| // Several tests in this file are configurable by environment variables: | ||||
| @@ -114,7 +114,10 @@ func TestPersistentVolumeRecycler(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "pv-recycler" | ||||
|  | ||||
| 	testClient, ctrl, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
|  | ||||
| 	testClient, ctrl, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -125,10 +128,8 @@ func TestPersistentVolumeRecycler(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go ctrl.Run(tCtx) | ||||
|  | ||||
| 	// This PV will be claimed, released, and recycled. | ||||
| 	pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle) | ||||
| @@ -170,7 +171,9 @@ func TestPersistentVolumeDeleter(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "pv-deleter" | ||||
|  | ||||
| 	testClient, ctrl, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, ctrl, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -181,10 +184,8 @@ func TestPersistentVolumeDeleter(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go ctrl.Run(tCtx) | ||||
|  | ||||
| 	// This PV will be claimed, released, and deleted. | ||||
| 	pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete) | ||||
| @@ -231,7 +232,9 @@ func TestPersistentVolumeBindRace(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "pv-bind-race" | ||||
|  | ||||
| 	testClient, ctrl, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, ctrl, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -242,10 +245,8 @@ func TestPersistentVolumeBindRace(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go ctrl.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go ctrl.Run(tCtx) | ||||
|  | ||||
| 	pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain) | ||||
| 	pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "") | ||||
| @@ -302,7 +303,9 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "pvc-label-selector" | ||||
|  | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -313,10 +316,8 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go controller.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go controller.Run(tCtx) | ||||
|  | ||||
| 	var ( | ||||
| 		err     error | ||||
| @@ -384,7 +385,9 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "pvc-match-expressions" | ||||
|  | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -395,10 +398,8 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go controller.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go controller.Run(tCtx) | ||||
|  | ||||
| 	var ( | ||||
| 		err     error | ||||
| @@ -485,7 +486,9 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "multi-pvs" | ||||
|  | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -496,10 +499,8 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go controller.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go controller.Run(tCtx) | ||||
|  | ||||
| 	maxPVs := getObjectCount() | ||||
| 	pvs := make([]*v1.PersistentVolume, maxPVs) | ||||
| @@ -576,7 +577,9 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "multi-pvs-pvcs" | ||||
|  | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -587,10 +590,8 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go binder.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go binder.Run(tCtx) | ||||
|  | ||||
| 	objCount := getObjectCount() | ||||
| 	pvs := make([]*v1.PersistentVolume, objCount) | ||||
| @@ -742,7 +743,9 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { | ||||
| 	const shortSyncPeriod = 2 * time.Second | ||||
| 	syncPeriod := getSyncPeriod(shortSyncPeriod) | ||||
|  | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, shortSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, shortSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -801,10 +804,8 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// Start the controller when all PVs and PVCs are already saved in etcd | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go binder.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go binder.Run(tCtx) | ||||
|  | ||||
| 	// wait for at least two sync periods for changes. No volume should be | ||||
| 	// Released and no claim should be Lost during this time. | ||||
| @@ -867,7 +868,9 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "provision-multi-pvs" | ||||
|  | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -890,10 +893,8 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { | ||||
| 	} | ||||
| 	testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass, metav1.CreateOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go binder.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go binder.Run(tCtx) | ||||
|  | ||||
| 	objCount := getObjectCount() | ||||
| 	pvcs := make([]*v1.PersistentVolumeClaim, objCount) | ||||
| @@ -963,7 +964,9 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { | ||||
| 	defer s.TearDownFn() | ||||
| 	namespaceName := "multi-pvs-diff-access" | ||||
|  | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -974,10 +977,8 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) { | ||||
| 	// non-namespaced objects (PersistenceVolumes). | ||||
| 	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go controller.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go controller.Run(tCtx) | ||||
|  | ||||
| 	// This PV will be claimed, released, and deleted | ||||
| 	pvRwo := createPV("pv-rwo", "/tmp/foo", "10G", | ||||
| @@ -1048,7 +1049,9 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) { | ||||
| 	defaultStorageClassName := "gold" | ||||
| 	storageClassName := "silver" | ||||
|  | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod) | ||||
| 	tCtx := ktesting.Init(t) | ||||
| 	defer tCtx.Cancel("test has completed") | ||||
| 	testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) | ||||
| 	defer watchPV.Stop() | ||||
| 	defer watchPVC.Stop() | ||||
|  | ||||
| @@ -1078,10 +1081,8 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) { | ||||
| 		t.Errorf("Failed to create a storage class: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(context.TODO()) | ||||
| 	informers.Start(ctx.Done()) | ||||
| 	go binder.Run(ctx) | ||||
| 	defer cancel() | ||||
| 	informers.Start(tCtx.Done()) | ||||
| 	go binder.Run(tCtx) | ||||
|  | ||||
| 	klog.V(2).Infof("TestRetroactiveStorageClassAssignment: start") | ||||
|  | ||||
| @@ -1326,7 +1327,7 @@ func waitForPersistentVolumeClaimStorageClass(t *testing.T, claimName, scName st | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.TestServer, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, informers.SharedInformerFactory, watch.Interface, watch.Interface) { | ||||
| func createClients(ctx context.Context, namespaceName string, t *testing.T, s *kubeapiservertesting.TestServer, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, informers.SharedInformerFactory, watch.Interface, watch.Interface) { | ||||
| 	// Use higher QPS and Burst, there is a test for race conditions which | ||||
| 	// creates many objects and default values were too low. | ||||
| 	binderConfig := restclient.CopyConfig(s.ClientConfig) | ||||
| @@ -1354,7 +1355,6 @@ func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.T | ||||
| 	plugins := []volume.VolumePlugin{plugin} | ||||
| 	cloud := &fakecloud.Cloud{} | ||||
| 	informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod)) | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctrl, err := persistentvolumecontroller.NewController( | ||||
| 		ctx, | ||||
| 		persistentvolumecontroller.ControllerParameters{ | ||||
|   | ||||
| @@ -27,7 +27,6 @@ import ( | ||||
| 	"time" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/ktesting" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| @@ -1121,8 +1120,7 @@ func initPVController(t *testing.T, testCtx *testutil.TestContext, provisionDela | ||||
| 		NodeInformer:              informerFactory.Core().V1().Nodes(), | ||||
| 		EnableDynamicProvisioning: true, | ||||
| 	} | ||||
| 	_, ctx := ktesting.NewTestContext(t) | ||||
| 	ctrl, err := persistentvolume.NewController(ctx, params) | ||||
| 	ctrl, err := persistentvolume.NewController(testCtx.Ctx, params) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Prow Robot
					Kubernetes Prow Robot