kube-controller-manager: readjust log verbosity
- Increase the global level for broadcaster's logging to 3 so that users can ignore event messages by lowering the logging level. It reduces information noise. - Making sure the context is properly injected into the broadcaster, this will allow the -v flag value to be used also in that broadcaster, rather than the above global value. - test: use cancellation from ktesting - golangci-hints: checked error return value
This commit is contained in:
@@ -26,7 +26,6 @@ import (
|
|||||||
|
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
"k8s.io/controller-manager/controller"
|
"k8s.io/controller-manager/controller"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/cmd/kube-controller-manager/names"
|
"k8s.io/kubernetes/cmd/kube-controller-manager/names"
|
||||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||||
@@ -87,7 +86,7 @@ func newReplicaSetControllerDescriptor() *ControllerDescriptor {
|
|||||||
|
|
||||||
func startReplicaSetController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
func startReplicaSetController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||||
go replicaset.NewReplicaSetController(
|
go replicaset.NewReplicaSetController(
|
||||||
klog.FromContext(ctx),
|
ctx,
|
||||||
controllerContext.InformerFactory.Apps().V1().ReplicaSets(),
|
controllerContext.InformerFactory.Apps().V1().ReplicaSets(),
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.ClientBuilder.ClientOrDie("replicaset-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("replicaset-controller"),
|
||||||
|
@@ -79,6 +79,7 @@ func startHPAControllerWithMetricsClient(ctx context.Context, controllerContext
|
|||||||
}
|
}
|
||||||
|
|
||||||
go podautoscaler.NewHorizontalController(
|
go podautoscaler.NewHorizontalController(
|
||||||
|
ctx,
|
||||||
hpaClient.CoreV1(),
|
hpaClient.CoreV1(),
|
||||||
scaleClient,
|
scaleClient,
|
||||||
hpaClient.AutoscalingV2(),
|
hpaClient.AutoscalingV2(),
|
||||||
|
@@ -375,7 +375,7 @@ func startPersistentVolumeAttachDetachController(ctx context.Context, controller
|
|||||||
ctx = klog.NewContext(ctx, logger)
|
ctx = klog.NewContext(ctx, logger)
|
||||||
attachDetachController, attachDetachControllerErr :=
|
attachDetachController, attachDetachControllerErr :=
|
||||||
attachdetach.NewAttachDetachController(
|
attachdetach.NewAttachDetachController(
|
||||||
logger,
|
ctx,
|
||||||
controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"),
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.InformerFactory.Core().V1().Nodes(),
|
controllerContext.InformerFactory.Core().V1().Nodes(),
|
||||||
@@ -416,6 +416,7 @@ func startPersistentVolumeExpanderController(ctx context.Context, controllerCont
|
|||||||
csiTranslator := csitrans.New()
|
csiTranslator := csitrans.New()
|
||||||
|
|
||||||
expandController, expandControllerErr := expand.NewExpandController(
|
expandController, expandControllerErr := expand.NewExpandController(
|
||||||
|
ctx,
|
||||||
controllerContext.ClientBuilder.ClientOrDie("expand-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("expand-controller"),
|
||||||
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||||
controllerContext.Cloud,
|
controllerContext.Cloud,
|
||||||
@@ -441,6 +442,7 @@ func newEphemeralVolumeControllerDescriptor() *ControllerDescriptor {
|
|||||||
|
|
||||||
func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||||
ephemeralController, err := ephemeral.NewController(
|
ephemeralController, err := ephemeral.NewController(
|
||||||
|
ctx,
|
||||||
controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims())
|
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims())
|
||||||
@@ -489,6 +491,7 @@ func newEndpointsControllerDescriptor() *ControllerDescriptor {
|
|||||||
|
|
||||||
func startEndpointsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
func startEndpointsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||||
go endpointcontroller.NewEndpointController(
|
go endpointcontroller.NewEndpointController(
|
||||||
|
ctx,
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.InformerFactory.Core().V1().Services(),
|
controllerContext.InformerFactory.Core().V1().Services(),
|
||||||
controllerContext.InformerFactory.Core().V1().Endpoints(),
|
controllerContext.InformerFactory.Core().V1().Endpoints(),
|
||||||
@@ -508,7 +511,7 @@ func newReplicationControllerDescriptor() *ControllerDescriptor {
|
|||||||
|
|
||||||
func startReplicationController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
func startReplicationController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||||
go replicationcontroller.NewReplicationManager(
|
go replicationcontroller.NewReplicationManager(
|
||||||
klog.FromContext(ctx),
|
ctx,
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.InformerFactory.Core().V1().ReplicationControllers(),
|
controllerContext.InformerFactory.Core().V1().ReplicationControllers(),
|
||||||
controllerContext.ClientBuilder.ClientOrDie("replication-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("replication-controller"),
|
||||||
@@ -686,6 +689,7 @@ func startGarbageCollectorController(ctx context.Context, controllerContext Cont
|
|||||||
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
|
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
|
||||||
}
|
}
|
||||||
garbageCollector, err := garbagecollector.NewGarbageCollector(
|
garbageCollector, err := garbagecollector.NewGarbageCollector(
|
||||||
|
ctx,
|
||||||
gcClientset,
|
gcClientset,
|
||||||
metadataClient,
|
metadataClient,
|
||||||
controllerContext.RESTMapper,
|
controllerContext.RESTMapper,
|
||||||
|
@@ -39,6 +39,7 @@ func newServiceCIDRsControllerDescriptor() *ControllerDescriptor {
|
|||||||
}
|
}
|
||||||
func startServiceCIDRsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
func startServiceCIDRsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||||
go servicecidrs.NewController(
|
go servicecidrs.NewController(
|
||||||
|
ctx,
|
||||||
controllerContext.InformerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
controllerContext.InformerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
||||||
controllerContext.InformerFactory.Networking().V1alpha1().IPAddresses(),
|
controllerContext.InformerFactory.Networking().V1alpha1().IPAddresses(),
|
||||||
controllerContext.ClientBuilder.ClientOrDie("service-cidrs-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("service-cidrs-controller"),
|
||||||
|
@@ -82,7 +82,7 @@ type ControllerV2 struct {
|
|||||||
// NewControllerV2 creates and initializes a new Controller.
|
// NewControllerV2 creates and initializes a new Controller.
|
||||||
func NewControllerV2(ctx context.Context, jobInformer batchv1informers.JobInformer, cronJobsInformer batchv1informers.CronJobInformer, kubeClient clientset.Interface) (*ControllerV2, error) {
|
func NewControllerV2(ctx context.Context, jobInformer batchv1informers.JobInformer, cronJobsInformer batchv1informers.CronJobInformer, kubeClient clientset.Interface) (*ControllerV2, error) {
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
|
|
||||||
jm := &ControllerV2{
|
jm := &ControllerV2{
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cronjob"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cronjob"),
|
||||||
@@ -129,7 +129,7 @@ func (jm *ControllerV2) Run(ctx context.Context, workers int) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start event processing pipeline.
|
// Start event processing pipeline.
|
||||||
jm.broadcaster.StartStructuredLogging(0)
|
jm.broadcaster.StartStructuredLogging(3)
|
||||||
jm.broadcaster.StartRecordingToSink(&covev1client.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")})
|
jm.broadcaster.StartRecordingToSink(&covev1client.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")})
|
||||||
defer jm.broadcaster.Shutdown()
|
defer jm.broadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -138,7 +138,7 @@ func NewDaemonSetsController(
|
|||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
failedPodsBackoff *flowcontrol.Backoff,
|
failedPodsBackoff *flowcontrol.Backoff,
|
||||||
) (*DaemonSetsController, error) {
|
) (*DaemonSetsController, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
dsc := &DaemonSetsController{
|
dsc := &DaemonSetsController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
@@ -279,7 +279,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(logger klog.Logger, obj interfa
|
|||||||
func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
|
func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
dsc.eventBroadcaster.StartStructuredLogging(0)
|
dsc.eventBroadcaster.StartStructuredLogging(3)
|
||||||
dsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dsc.kubeClient.CoreV1().Events("")})
|
dsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dsc.kubeClient.CoreV1().Events("")})
|
||||||
defer dsc.eventBroadcaster.Shutdown()
|
defer dsc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -99,7 +99,7 @@ type DeploymentController struct {
|
|||||||
|
|
||||||
// NewDeploymentController creates a new DeploymentController.
|
// NewDeploymentController creates a new DeploymentController.
|
||||||
func NewDeploymentController(ctx context.Context, dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
func NewDeploymentController(ctx context.Context, dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
dc := &DeploymentController{
|
dc := &DeploymentController{
|
||||||
client: client,
|
client: client,
|
||||||
@@ -158,7 +158,7 @@ func (dc *DeploymentController) Run(ctx context.Context, workers int) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
dc.eventBroadcaster.StartStructuredLogging(0)
|
dc.eventBroadcaster.StartStructuredLogging(3)
|
||||||
dc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.client.CoreV1().Events("")})
|
dc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.client.CoreV1().Events("")})
|
||||||
defer dc.eventBroadcaster.Shutdown()
|
defer dc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -181,7 +181,7 @@ func NewDisruptionControllerInternal(ctx context.Context,
|
|||||||
queue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "disruption"), workqueue.DefaultControllerRateLimiter()),
|
queue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "disruption"), workqueue.DefaultControllerRateLimiter()),
|
||||||
recheckQueue: workqueue.NewDelayingQueueWithCustomClock(clock, "disruption_recheck"),
|
recheckQueue: workqueue.NewDelayingQueueWithCustomClock(clock, "disruption_recheck"),
|
||||||
stalePodDisruptionQueue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "stale_pod_disruption"), workqueue.DefaultControllerRateLimiter()),
|
stalePodDisruptionQueue: workqueue.NewRateLimitingQueueWithDelayingInterface(workqueue.NewDelayingQueueWithCustomClock(clock, "stale_pod_disruption"), workqueue.DefaultControllerRateLimiter()),
|
||||||
broadcaster: record.NewBroadcaster(),
|
broadcaster: record.NewBroadcaster(record.WithContext(ctx)),
|
||||||
stalePodDisruptionTimeout: stalePodDisruptionTimeout,
|
stalePodDisruptionTimeout: stalePodDisruptionTimeout,
|
||||||
}
|
}
|
||||||
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||||
|
@@ -70,9 +70,9 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewEndpointController returns a new *Controller.
|
// NewEndpointController returns a new *Controller.
|
||||||
func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
|
func NewEndpointController(ctx context.Context, podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
|
||||||
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface, endpointUpdatesBatchPeriod time.Duration) *Controller {
|
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface, endpointUpdatesBatchPeriod time.Duration) *Controller {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"})
|
||||||
|
|
||||||
e := &Controller{
|
e := &Controller{
|
||||||
@@ -164,7 +164,7 @@ func (e *Controller) Run(ctx context.Context, workers int) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
e.eventBroadcaster.StartStructuredLogging(0)
|
e.eventBroadcaster.StartStructuredLogging(3)
|
||||||
e.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: e.client.CoreV1().Events("")})
|
e.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: e.client.CoreV1().Events("")})
|
||||||
defer e.eventBroadcaster.Shutdown()
|
defer e.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -43,6 +43,7 @@ import (
|
|||||||
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
controllerpkg "k8s.io/kubernetes/pkg/controller"
|
controllerpkg "k8s.io/kubernetes/pkg/controller"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
utilnet "k8s.io/utils/net"
|
utilnet "k8s.io/utils/net"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
@@ -209,10 +210,10 @@ type endpointController struct {
|
|||||||
endpointsStore cache.Store
|
endpointsStore cache.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func newController(url string, batchPeriod time.Duration) *endpointController {
|
func newController(ctx context.Context, url string, batchPeriod time.Duration) *endpointController {
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: url, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, controllerpkg.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(client, controllerpkg.NoResyncPeriodFunc())
|
||||||
endpoints := NewEndpointController(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Services(),
|
endpoints := NewEndpointController(ctx, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Services(),
|
||||||
informerFactory.Core().V1().Endpoints(), client, batchPeriod)
|
informerFactory.Core().V1().Endpoints(), client, batchPeriod)
|
||||||
endpoints.podsSynced = alwaysReady
|
endpoints.podsSynced = alwaysReady
|
||||||
endpoints.servicesSynced = alwaysReady
|
endpoints.servicesSynced = alwaysReady
|
||||||
@@ -225,11 +226,12 @@ func newController(url string, batchPeriod time.Duration) *endpointController {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFakeController(batchPeriod time.Duration) (*fake.Clientset, *endpointController) {
|
func newFakeController(ctx context.Context, batchPeriod time.Duration) (*fake.Clientset, *endpointController) {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, controllerpkg.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(client, controllerpkg.NoResyncPeriodFunc())
|
||||||
|
|
||||||
eController := NewEndpointController(
|
eController := NewEndpointController(
|
||||||
|
ctx,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Services(),
|
informerFactory.Core().V1().Services(),
|
||||||
informerFactory.Core().V1().Endpoints(),
|
informerFactory.Core().V1().Endpoints(),
|
||||||
@@ -252,7 +254,9 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -268,7 +272,10 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||||
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
|
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 0)
|
endpointsHandler.ValidateRequestCount(t, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,7 +283,9 @@ func TestSyncEndpointsExistingNilSubsets(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -292,7 +301,10 @@ func TestSyncEndpointsExistingNilSubsets(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80}},
|
Ports: []v1.ServicePort{{Port: 80}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 0)
|
endpointsHandler.ValidateRequestCount(t, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -300,7 +312,9 @@ func TestSyncEndpointsExistingEmptySubsets(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -316,7 +330,10 @@ func TestSyncEndpointsExistingEmptySubsets(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80}},
|
Ports: []v1.ServicePort{{Port: 80}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 0)
|
endpointsHandler.ValidateRequestCount(t, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +343,8 @@ func TestSyncEndpointsWithPodResourceVersionUpdateOnly(t *testing.T) {
|
|||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
pod0 := testPod(ns, 0, 1, true, ipv4only)
|
pod0 := testPod(ns, 0, 1, true, ipv4only)
|
||||||
pod1 := testPod(ns, 1, 1, false, ipv4only)
|
pod1 := testPod(ns, 1, 1, false, ipv4only)
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -362,7 +380,11 @@ func TestSyncEndpointsWithPodResourceVersionUpdateOnly(t *testing.T) {
|
|||||||
pod1.ResourceVersion = "4"
|
pod1.ResourceVersion = "4"
|
||||||
endpoints.podStore.Add(pod0)
|
endpoints.podStore.Add(pod0)
|
||||||
endpoints.podStore.Add(pod1)
|
endpoints.podStore.Add(pod1)
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 0)
|
endpointsHandler.ValidateRequestCount(t, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -370,7 +392,8 @@ func TestSyncEndpointsNewNoSubsets(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.serviceStore.Add(&v1.Service{
|
endpoints.serviceStore.Add(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
@@ -378,7 +401,10 @@ func TestSyncEndpointsNewNoSubsets(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80}},
|
Ports: []v1.ServicePort{{Port: 80}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -386,7 +412,9 @@ func TestCheckLeftoverEndpoints(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, _ := makeTestServer(t, ns)
|
testServer, _ := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -412,7 +440,8 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -432,7 +461,10 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -456,7 +488,8 @@ func TestSyncEndpointsHeadlessServiceLabel(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -475,7 +508,11 @@ func TestSyncEndpointsHeadlessServiceLabel(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80}},
|
Ports: []v1.ServicePort{{Port: 80}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 0)
|
endpointsHandler.ValidateRequestCount(t, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,12 +571,13 @@ func TestSyncServiceExternalNameType(t *testing.T) {
|
|||||||
testServer, endpointsHandler := makeTestServer(t, namespace)
|
testServer, endpointsHandler := makeTestServer(t, namespace)
|
||||||
|
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
err := endpoints.serviceStore.Add(tc.service)
|
err := endpoints.serviceStore.Add(tc.service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error adding service to service store: %v", err)
|
t.Fatalf("Error adding service to service store: %v", err)
|
||||||
}
|
}
|
||||||
err = endpoints.syncService(context.TODO(), namespace+"/"+serviceName)
|
err = endpoints.syncService(tCtx, namespace+"/"+serviceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error syncing service: %v", err)
|
t.Fatalf("Error syncing service: %v", err)
|
||||||
}
|
}
|
||||||
@@ -552,7 +590,8 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -572,7 +611,10 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "UDP"}},
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "UDP"}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -596,7 +638,8 @@ func TestSyncEndpointsProtocolSCTP(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -616,7 +659,10 @@ func TestSyncEndpointsProtocolSCTP(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "SCTP"}},
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "SCTP"}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -640,7 +686,8 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -657,7 +704,10 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@@ -680,7 +730,9 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -697,7 +749,10 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@@ -720,7 +775,9 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -737,7 +794,10 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@@ -761,7 +821,8 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||||||
ns := "bar"
|
ns := "bar"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -781,7 +842,10 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@@ -804,7 +868,8 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
ResourceVersion: "1",
|
ResourceVersion: "1",
|
||||||
@@ -824,7 +889,10 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 0)
|
endpointsHandler.ValidateRequestCount(t, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -832,7 +900,8 @@ func TestSyncEndpointsItems(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
addPods(endpoints.podStore, ns, 3, 2, 0, ipv4only)
|
addPods(endpoints.podStore, ns, 3, 2, 0, ipv4only)
|
||||||
addPods(endpoints.podStore, "blah", 5, 2, 0, ipv4only) // make sure these aren't found!
|
addPods(endpoints.podStore, "blah", 5, 2, 0, ipv4only) // make sure these aren't found!
|
||||||
|
|
||||||
@@ -846,7 +915,10 @@ func TestSyncEndpointsItems(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), "other/foo")
|
err := endpoints.syncService(tCtx, "other/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
expectedSubsets := []v1.EndpointSubset{{
|
expectedSubsets := []v1.EndpointSubset{{
|
||||||
Addresses: []v1.EndpointAddress{
|
Addresses: []v1.EndpointAddress{
|
||||||
@@ -877,7 +949,8 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
addPods(endpoints.podStore, ns, 3, 2, 0, ipv4only)
|
addPods(endpoints.podStore, ns, 3, 2, 0, ipv4only)
|
||||||
serviceLabels := map[string]string{"foo": "bar"}
|
serviceLabels := map[string]string{"foo": "bar"}
|
||||||
endpoints.serviceStore.Add(&v1.Service{
|
endpoints.serviceStore.Add(&v1.Service{
|
||||||
@@ -894,7 +967,10 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
expectedSubsets := []v1.EndpointSubset{{
|
expectedSubsets := []v1.EndpointSubset{{
|
||||||
Addresses: []v1.EndpointAddress{
|
Addresses: []v1.EndpointAddress{
|
||||||
@@ -925,7 +1001,8 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
|||||||
ns := "bar"
|
ns := "bar"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -953,7 +1030,10 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
serviceLabels[v1.IsHeadlessService] = ""
|
serviceLabels[v1.IsHeadlessService] = ""
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -989,7 +1069,8 @@ func TestWaitsForAllInformersToBeSynced2(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only)
|
addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only)
|
||||||
|
|
||||||
service := &v1.Service{
|
service := &v1.Service{
|
||||||
@@ -1005,9 +1086,7 @@ func TestWaitsForAllInformersToBeSynced2(t *testing.T) {
|
|||||||
endpoints.servicesSynced = test.servicesSynced
|
endpoints.servicesSynced = test.servicesSynced
|
||||||
endpoints.endpointsSynced = test.endpointsSynced
|
endpoints.endpointsSynced = test.endpointsSynced
|
||||||
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
||||||
stopCh := make(chan struct{})
|
go endpoints.Run(tCtx, 1)
|
||||||
defer close(stopCh)
|
|
||||||
go endpoints.Run(context.TODO(), 1)
|
|
||||||
|
|
||||||
// cache.WaitForNamedCacheSync has a 100ms poll period, and the endpoints worker has a 10ms period.
|
// cache.WaitForNamedCacheSync has a 100ms poll period, and the endpoints worker has a 10ms period.
|
||||||
// To ensure we get all updates, including unexpected ones, we need to wait at least as long as
|
// To ensure we get all updates, including unexpected ones, we need to wait at least as long as
|
||||||
@@ -1030,7 +1109,8 @@ func TestSyncEndpointsHeadlessService(t *testing.T) {
|
|||||||
ns := "headless"
|
ns := "headless"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1053,7 +1133,10 @@ func TestSyncEndpointsHeadlessService(t *testing.T) {
|
|||||||
}
|
}
|
||||||
originalService := service.DeepCopy()
|
originalService := service.DeepCopy()
|
||||||
endpoints.serviceStore.Add(service)
|
endpoints.serviceStore.Add(service)
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1080,7 +1163,9 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseFail
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1100,7 +1185,10 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseFail
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1119,7 +1207,9 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseSucc
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1139,7 +1229,10 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseSucc
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1158,7 +1251,9 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhase
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1178,7 +1273,11 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhase
|
|||||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt32(8080)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1197,7 +1296,8 @@ func TestSyncEndpointsHeadlessWithoutPort(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.serviceStore.Add(&v1.Service{
|
endpoints.serviceStore.Add(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
@@ -1207,7 +1307,11 @@ func TestSyncEndpointsHeadlessWithoutPort(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only)
|
addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only)
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
|
||||||
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@@ -1429,7 +1533,9 @@ func TestLastTriggerChangeTimeAnnotation(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1449,7 +1555,10 @@ func TestLastTriggerChangeTimeAnnotation(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -1476,7 +1585,9 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationOverridden(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1499,7 +1610,10 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationOverridden(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -1526,7 +1640,9 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationCleared(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0*time.Second)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1550,7 +1666,10 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationCleared(t *testing.T) {
|
|||||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
|
||||||
@@ -1671,15 +1790,15 @@ func TestPodUpdatesBatching(t *testing.T) {
|
|||||||
resourceVersion := 1
|
resourceVersion := 1
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, tc.batchPeriod)
|
|
||||||
stopCh := make(chan struct{})
|
tCtx := ktesting.Init(t)
|
||||||
defer close(stopCh)
|
endpoints := newController(tCtx, testServer.URL, tc.batchPeriod)
|
||||||
endpoints.podsSynced = alwaysReady
|
endpoints.podsSynced = alwaysReady
|
||||||
endpoints.servicesSynced = alwaysReady
|
endpoints.servicesSynced = alwaysReady
|
||||||
endpoints.endpointsSynced = alwaysReady
|
endpoints.endpointsSynced = alwaysReady
|
||||||
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
||||||
|
|
||||||
go endpoints.Run(context.TODO(), 1)
|
go endpoints.Run(tCtx, 1)
|
||||||
|
|
||||||
addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only)
|
addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only)
|
||||||
|
|
||||||
@@ -1794,15 +1913,15 @@ func TestPodAddsBatching(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, tc.batchPeriod)
|
|
||||||
stopCh := make(chan struct{})
|
tCtx := ktesting.Init(t)
|
||||||
defer close(stopCh)
|
endpoints := newController(tCtx, testServer.URL, tc.batchPeriod)
|
||||||
endpoints.podsSynced = alwaysReady
|
endpoints.podsSynced = alwaysReady
|
||||||
endpoints.servicesSynced = alwaysReady
|
endpoints.servicesSynced = alwaysReady
|
||||||
endpoints.endpointsSynced = alwaysReady
|
endpoints.endpointsSynced = alwaysReady
|
||||||
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
||||||
|
|
||||||
go endpoints.Run(context.TODO(), 1)
|
go endpoints.Run(tCtx, 1)
|
||||||
|
|
||||||
endpoints.serviceStore.Add(&v1.Service{
|
endpoints.serviceStore.Add(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||||
@@ -1916,15 +2035,15 @@ func TestPodDeleteBatching(t *testing.T) {
|
|||||||
ns := "other"
|
ns := "other"
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, tc.batchPeriod)
|
|
||||||
stopCh := make(chan struct{})
|
tCtx := ktesting.Init(t)
|
||||||
defer close(stopCh)
|
endpoints := newController(tCtx, testServer.URL, tc.batchPeriod)
|
||||||
endpoints.podsSynced = alwaysReady
|
endpoints.podsSynced = alwaysReady
|
||||||
endpoints.servicesSynced = alwaysReady
|
endpoints.servicesSynced = alwaysReady
|
||||||
endpoints.endpointsSynced = alwaysReady
|
endpoints.endpointsSynced = alwaysReady
|
||||||
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
endpoints.workerLoopPeriod = 10 * time.Millisecond
|
||||||
|
|
||||||
go endpoints.Run(context.TODO(), 1)
|
go endpoints.Run(tCtx, 1)
|
||||||
|
|
||||||
addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only)
|
addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only)
|
||||||
|
|
||||||
@@ -1960,7 +2079,9 @@ func TestSyncEndpointsServiceNotFound(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, endpointsHandler := makeTestServer(t, ns)
|
testServer, endpointsHandler := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
endpoints := newController(testServer.URL, 0)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
endpoints := newController(tCtx, testServer.URL, 0)
|
||||||
endpoints.endpointsStore.Add(&v1.Endpoints{
|
endpoints.endpointsStore.Add(&v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@@ -1968,7 +2089,10 @@ func TestSyncEndpointsServiceNotFound(t *testing.T) {
|
|||||||
ResourceVersion: "1",
|
ResourceVersion: "1",
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
endpoints.syncService(context.TODO(), ns+"/foo")
|
err := endpoints.syncService(tCtx, ns+"/foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
endpointsHandler.ValidateRequestCount(t, 1)
|
endpointsHandler.ValidateRequestCount(t, 1)
|
||||||
endpointsHandler.ValidateRequest(t, "/api/v1/namespaces/"+ns+"/endpoints/foo", "DELETE", nil)
|
endpointsHandler.ValidateRequest(t, "/api/v1/namespaces/"+ns+"/endpoints/foo", "DELETE", nil)
|
||||||
}
|
}
|
||||||
@@ -2058,8 +2182,9 @@ func TestSyncServiceOverCapacity(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
ns := "test"
|
ns := "test"
|
||||||
client, c := newFakeController(0 * time.Second)
|
client, c := newFakeController(tCtx, 0*time.Second)
|
||||||
|
|
||||||
addPods(c.podStore, ns, tc.numDesired, 1, tc.numDesiredNotReady, ipv4only)
|
addPods(c.podStore, ns, tc.numDesired, 1, tc.numDesiredNotReady, ipv4only)
|
||||||
pods := c.podStore.List()
|
pods := c.podStore.List()
|
||||||
@@ -2092,11 +2217,17 @@ func TestSyncServiceOverCapacity(t *testing.T) {
|
|||||||
endpoints.Annotations[v1.EndpointsOverCapacity] = *tc.startingAnnotation
|
endpoints.Annotations[v1.EndpointsOverCapacity] = *tc.startingAnnotation
|
||||||
}
|
}
|
||||||
c.endpointsStore.Add(endpoints)
|
c.endpointsStore.Add(endpoints)
|
||||||
client.CoreV1().Endpoints(ns).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
_, err := client.CoreV1().Endpoints(ns).Create(tCtx, endpoints, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating endpoints: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
c.syncService(context.TODO(), fmt.Sprintf("%s/%s", ns, svc.Name))
|
err = c.syncService(tCtx, fmt.Sprintf("%s/%s", ns, svc.Name))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
actualEndpoints, err := client.CoreV1().Endpoints(ns).Get(context.TODO(), endpoints.Name, metav1.GetOptions{})
|
actualEndpoints, err := client.CoreV1().Endpoints(ns).Get(tCtx, endpoints.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error getting endpoints: %v", err)
|
t.Fatalf("unexpected error getting endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@@ -2250,10 +2381,11 @@ func TestMultipleServiceChanges(t *testing.T) {
|
|||||||
testServer := makeBlockingEndpointDeleteTestServer(t, controller, endpoint, blockDelete, blockNextAction, ns)
|
testServer := makeBlockingEndpointDeleteTestServer(t, controller, endpoint, blockDelete, blockNextAction, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
|
|
||||||
*controller = *newController(testServer.URL, 0*time.Second)
|
tCtx := ktesting.Init(t)
|
||||||
|
*controller = *newController(tCtx, testServer.URL, 0*time.Second)
|
||||||
addPods(controller.podStore, ns, 1, 1, 0, ipv4only)
|
addPods(controller.podStore, ns, 1, 1, 0, ipv4only)
|
||||||
|
|
||||||
go func() { controller.Run(context.TODO(), 1) }()
|
go func() { controller.Run(tCtx, 1) }()
|
||||||
|
|
||||||
svc := &v1.Service{
|
svc := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||||
@@ -2481,8 +2613,10 @@ func TestSyncServiceAddresses(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
|
||||||
ns := tc.service.Namespace
|
ns := tc.service.Namespace
|
||||||
client, c := newFakeController(0 * time.Second)
|
client, c := newFakeController(tCtx, 0*time.Second)
|
||||||
|
|
||||||
err := c.podStore.Add(tc.pod)
|
err := c.podStore.Add(tc.pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2492,12 +2626,12 @@ func TestSyncServiceAddresses(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error adding service %v", err)
|
t.Errorf("Unexpected error adding service %v", err)
|
||||||
}
|
}
|
||||||
err = c.syncService(context.TODO(), fmt.Sprintf("%s/%s", ns, tc.service.Name))
|
err = c.syncService(tCtx, fmt.Sprintf("%s/%s", ns, tc.service.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error syncing service %v", err)
|
t.Errorf("Unexpected error syncing service %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := client.CoreV1().Endpoints(ns).Get(context.TODO(), tc.service.Name, metav1.GetOptions{})
|
endpoints, err := client.CoreV1().Endpoints(ns).Get(tCtx, tc.service.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unexpected error %v", err)
|
t.Errorf("Unexpected error %v", err)
|
||||||
}
|
}
|
||||||
@@ -2524,7 +2658,9 @@ func TestEndpointsDeletionEvents(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
testServer, _ := makeTestServer(t, ns)
|
testServer, _ := makeTestServer(t, ns)
|
||||||
defer testServer.Close()
|
defer testServer.Close()
|
||||||
controller := newController(testServer.URL, 0)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
controller := newController(tCtx, testServer.URL, 0)
|
||||||
store := controller.endpointsStore
|
store := controller.endpointsStore
|
||||||
ep1 := &v1.Endpoints{
|
ep1 := &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@@ -86,7 +86,7 @@ func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
|
|||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
endpointUpdatesBatchPeriod time.Duration,
|
endpointUpdatesBatchPeriod time.Duration,
|
||||||
) *Controller {
|
) *Controller {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
|
||||||
|
|
||||||
endpointslicemetrics.RegisterMetrics()
|
endpointslicemetrics.RegisterMetrics()
|
||||||
|
@@ -76,7 +76,7 @@ func NewController(ctx context.Context, endpointsInformer coreinformers.Endpoint
|
|||||||
endpointUpdatesBatchPeriod time.Duration,
|
endpointUpdatesBatchPeriod time.Duration,
|
||||||
) *Controller {
|
) *Controller {
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-mirroring-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-mirroring-controller"})
|
||||||
|
|
||||||
metrics.RegisterMetrics()
|
metrics.RegisterMetrics()
|
||||||
|
@@ -29,9 +29,9 @@ import (
|
|||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/component-base/metrics/testutil"
|
"k8s.io/component-base/metrics/testutil"
|
||||||
endpointsliceutil "k8s.io/endpointslice/util"
|
endpointsliceutil "k8s.io/endpointslice/util"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
endpointsv1 "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
endpointsv1 "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
|
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1007,6 +1007,7 @@ func TestReconcile(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.testName, func(t *testing.T) {
|
t.Run(tc.testName, func(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
client := newClientset()
|
client := newClientset()
|
||||||
setupMetrics()
|
setupMetrics()
|
||||||
namespace := "test"
|
namespace := "test"
|
||||||
@@ -1037,7 +1038,7 @@ func TestReconcile(t *testing.T) {
|
|||||||
if maxEndpointsPerSubset == 0 {
|
if maxEndpointsPerSubset == 0 {
|
||||||
maxEndpointsPerSubset = defaultMaxEndpointsPerSubset
|
maxEndpointsPerSubset = defaultMaxEndpointsPerSubset
|
||||||
}
|
}
|
||||||
r := newReconciler(client, maxEndpointsPerSubset)
|
r := newReconciler(tCtx, client, maxEndpointsPerSubset)
|
||||||
reconcileHelper(t, r, &endpoints, tc.existingEndpointSlices)
|
reconcileHelper(t, r, &endpoints, tc.existingEndpointSlices)
|
||||||
|
|
||||||
numExtraActions := len(client.Actions()) - numInitialActions
|
numExtraActions := len(client.Actions()) - numInitialActions
|
||||||
@@ -1057,8 +1058,8 @@ func TestReconcile(t *testing.T) {
|
|||||||
|
|
||||||
// Test Helpers
|
// Test Helpers
|
||||||
|
|
||||||
func newReconciler(client *fake.Clientset, maxEndpointsPerSubset int32) *reconciler {
|
func newReconciler(ctx context.Context, client *fake.Clientset, maxEndpointsPerSubset int32) *reconciler {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "endpoint-slice-mirroring-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "endpoint-slice-mirroring-controller"})
|
||||||
|
|
||||||
return &reconciler{
|
return &reconciler{
|
||||||
|
@@ -85,6 +85,7 @@ var _ controller.Debuggable = (*GarbageCollector)(nil)
|
|||||||
|
|
||||||
// NewGarbageCollector creates a new GarbageCollector.
|
// NewGarbageCollector creates a new GarbageCollector.
|
||||||
func NewGarbageCollector(
|
func NewGarbageCollector(
|
||||||
|
ctx context.Context,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
metadataClient metadata.Interface,
|
metadataClient metadata.Interface,
|
||||||
mapper meta.ResettableRESTMapper,
|
mapper meta.ResettableRESTMapper,
|
||||||
@@ -93,7 +94,7 @@ func NewGarbageCollector(
|
|||||||
informersStarted <-chan struct{},
|
informersStarted <-chan struct{},
|
||||||
) (*GarbageCollector, error) {
|
) (*GarbageCollector, error) {
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "garbage-collector-controller"})
|
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "garbage-collector-controller"})
|
||||||
|
|
||||||
attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete")
|
attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete")
|
||||||
@@ -147,7 +148,7 @@ func (gc *GarbageCollector) Run(ctx context.Context, workers int) {
|
|||||||
defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
|
defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
gc.eventBroadcaster.StartStructuredLogging(0)
|
gc.eventBroadcaster.StartStructuredLogging(3)
|
||||||
gc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gc.kubeClient.CoreV1().Events("")})
|
gc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gc.kubeClient.CoreV1().Events("")})
|
||||||
defer gc.eventBroadcaster.Shutdown()
|
defer gc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -30,7 +30,6 @@ import (
|
|||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
|
|
||||||
"github.com/golang/groupcache/lru"
|
"github.com/golang/groupcache/lru"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
@@ -64,6 +63,7 @@ import (
|
|||||||
"k8s.io/controller-manager/pkg/informerfactory"
|
"k8s.io/controller-manager/pkg/informerfactory"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
c "k8s.io/kubernetes/pkg/controller"
|
c "k8s.io/kubernetes/pkg/controller"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
type testRESTMapper struct {
|
type testRESTMapper struct {
|
||||||
@@ -98,15 +98,14 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
|||||||
// construction will not fail.
|
// construction will not fail.
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{},
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
|
gc, err := NewGarbageCollector(tCtx, client, metadataClient, rm, map[schema.GroupResource]struct{}{},
|
||||||
informerfactory.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
|
informerfactory.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors))
|
assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors))
|
||||||
|
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
|
||||||
|
|
||||||
// Make sure resource monitor syncing creates and stops resource monitors.
|
// Make sure resource monitor syncing creates and stops resource monitors.
|
||||||
tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
|
tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
|
||||||
err = gc.resyncMonitors(logger, twoResources)
|
err = gc.resyncMonitors(logger, twoResources)
|
||||||
@@ -121,10 +120,7 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
||||||
|
|
||||||
// Make sure the syncing mechanism also works after Run() has been called
|
go gc.Run(tCtx, 1)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
go gc.Run(ctx, 1)
|
|
||||||
|
|
||||||
err = gc.resyncMonitors(logger, twoResources)
|
err = gc.resyncMonitors(logger, twoResources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -212,6 +208,7 @@ type garbageCollector struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
||||||
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
metadataClient, err := metadata.NewForConfig(config)
|
metadataClient, err := metadata.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -221,7 +218,7 @@ func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
|||||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := NewGarbageCollector(client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted)
|
gc, err := NewGarbageCollector(ctx, client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -886,17 +883,17 @@ func TestGarbageCollectorSync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
gc, err := NewGarbageCollector(tCtx, client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
go gc.Run(tCtx, 1)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
go gc.Run(ctx, 1)
|
|
||||||
// The pseudo-code of GarbageCollector.Sync():
|
// The pseudo-code of GarbageCollector.Sync():
|
||||||
// GarbageCollector.Sync(client, period, stopCh):
|
// GarbageCollector.Sync(client, period, stopCh):
|
||||||
// wait.Until() loops with `period` until the `stopCh` is closed :
|
// wait.Until() loops with `period` until the `stopCh` is closed :
|
||||||
@@ -911,7 +908,7 @@ func TestGarbageCollectorSync(t *testing.T) {
|
|||||||
// The 1s sleep in the test allows GetDeletableResources and
|
// The 1s sleep in the test allows GetDeletableResources and
|
||||||
// gc.resyncMonitors to run ~5 times to ensure the changes to the
|
// gc.resyncMonitors to run ~5 times to ensure the changes to the
|
||||||
// fakeDiscoveryClient are picked up.
|
// fakeDiscoveryClient are picked up.
|
||||||
go gc.Sync(ctx, fakeDiscoveryClient, 200*time.Millisecond)
|
go gc.Sync(tCtx, fakeDiscoveryClient, 200*time.Millisecond)
|
||||||
|
|
||||||
// Wait until the sync discovers the initial resources
|
// Wait until the sync discovers the initial resources
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@@ -148,7 +148,7 @@ func NewController(ctx context.Context, podInformer coreinformers.PodInformer, j
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) (*Controller, error) {
|
func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) (*Controller, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
|
|
||||||
jm := &Controller{
|
jm := &Controller{
|
||||||
@@ -214,7 +214,7 @@ func (jm *Controller) Run(ctx context.Context, workers int) {
|
|||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
jm.broadcaster.StartStructuredLogging(0)
|
jm.broadcaster.StartStructuredLogging(3)
|
||||||
jm.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")})
|
jm.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: jm.kubeClient.CoreV1().Events("")})
|
||||||
defer jm.broadcaster.Shutdown()
|
defer jm.broadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -48,8 +48,8 @@ type adapter struct {
|
|||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter {
|
func newAdapter(ctx context.Context, k8s clientset.Interface, cloud *gce.Cloud) *adapter {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
|
|
||||||
ret := &adapter{
|
ret := &adapter{
|
||||||
k8s: k8s,
|
k8s: k8s,
|
||||||
@@ -65,7 +65,7 @@ func (a *adapter) Run(ctx context.Context) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start event processing pipeline.
|
// Start event processing pipeline.
|
||||||
a.broadcaster.StartStructuredLogging(0)
|
a.broadcaster.StartStructuredLogging(3)
|
||||||
a.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: a.k8s.CoreV1().Events("")})
|
a.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: a.k8s.CoreV1().Events("")})
|
||||||
defer a.broadcaster.Shutdown()
|
defer a.broadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -124,9 +124,9 @@ func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovide
|
|||||||
|
|
||||||
switch allocatorType {
|
switch allocatorType {
|
||||||
case RangeAllocatorType:
|
case RangeAllocatorType:
|
||||||
return NewCIDRRangeAllocator(logger, kubeClient, nodeInformer, allocatorParams, nodeList)
|
return NewCIDRRangeAllocator(ctx, kubeClient, nodeInformer, allocatorParams, nodeList)
|
||||||
case CloudAllocatorType:
|
case CloudAllocatorType:
|
||||||
return NewCloudCIDRAllocator(logger, kubeClient, cloud, nodeInformer)
|
return NewCloudCIDRAllocator(ctx, kubeClient, cloud, nodeInformer)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
|
return nil, fmt.Errorf("invalid CIDR allocator type: %v", allocatorType)
|
||||||
}
|
}
|
||||||
|
@@ -87,13 +87,14 @@ type cloudCIDRAllocator struct {
|
|||||||
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
||||||
|
|
||||||
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
||||||
func NewCloudCIDRAllocator(logger klog.Logger, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
func NewCloudCIDRAllocator(ctx context.Context, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
if client == nil {
|
if client == nil {
|
||||||
logger.Error(nil, "kubeClient is nil when starting cloud CIDR allocator")
|
logger.Error(nil, "kubeClient is nil when starting cloud CIDR allocator")
|
||||||
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||||
|
|
||||||
gceCloud, ok := cloud.(*gce.Cloud)
|
gceCloud, ok := cloud.(*gce.Cloud)
|
||||||
@@ -143,7 +144,7 @@ func (ca *cloudCIDRAllocator) Run(ctx context.Context) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start event processing pipeline.
|
// Start event processing pipeline.
|
||||||
ca.broadcaster.StartStructuredLogging(0)
|
ca.broadcaster.StartStructuredLogging(3)
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
logger.Info("Sending events to api server")
|
logger.Info("Sending events to api server")
|
||||||
ca.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ca.client.CoreV1().Events("")})
|
ca.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ca.client.CoreV1().Events("")})
|
||||||
|
@@ -20,15 +20,15 @@ limitations under the License.
|
|||||||
package ipam
|
package ipam
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
informers "k8s.io/client-go/informers/core/v1"
|
informers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
||||||
func NewCloudCIDRAllocator(logger klog.Logger, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
func NewCloudCIDRAllocator(ctx context.Context, client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
||||||
return nil, errors.New("legacy cloud provider support not built")
|
return nil, errors.New("legacy cloud provider support not built")
|
||||||
}
|
}
|
||||||
|
@@ -67,6 +67,7 @@ type Controller struct {
|
|||||||
|
|
||||||
// NewController returns a new instance of the IPAM controller.
|
// NewController returns a new instance of the IPAM controller.
|
||||||
func NewController(
|
func NewController(
|
||||||
|
ctx context.Context,
|
||||||
config *Config,
|
config *Config,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
@@ -89,7 +90,7 @@ func NewController(
|
|||||||
|
|
||||||
c := &Controller{
|
c := &Controller{
|
||||||
config: config,
|
config: config,
|
||||||
adapter: newAdapter(kubeClient, gceCloud),
|
adapter: newAdapter(ctx, kubeClient, gceCloud),
|
||||||
syncers: make(map[string]*nodesync.NodeSync),
|
syncers: make(map[string]*nodesync.NodeSync),
|
||||||
set: set,
|
set: set,
|
||||||
}
|
}
|
||||||
|
@@ -67,13 +67,14 @@ type rangeAllocator struct {
|
|||||||
// Caller must always pass in a list of existing nodes so the new allocator.
|
// Caller must always pass in a list of existing nodes so the new allocator.
|
||||||
// Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..)
|
// Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..)
|
||||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||||
func NewCIDRRangeAllocator(logger klog.Logger, client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
func NewCIDRRangeAllocator(ctx context.Context, client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
if client == nil {
|
if client == nil {
|
||||||
logger.Error(nil, "kubeClient is nil when starting CIDRRangeAllocator")
|
logger.Error(nil, "kubeClient is nil when starting CIDRRangeAllocator")
|
||||||
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||||
|
|
||||||
// create a cidrSet for each cidr we operate on
|
// create a cidrSet for each cidr we operate on
|
||||||
@@ -169,7 +170,7 @@ func (r *rangeAllocator) Run(ctx context.Context) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start event processing pipeline.
|
// Start event processing pipeline.
|
||||||
r.broadcaster.StartStructuredLogging(0)
|
r.broadcaster.StartStructuredLogging(3)
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
logger.Info("Sending events to api server")
|
logger.Info("Sending events to api server")
|
||||||
r.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: r.client.CoreV1().Events("")})
|
r.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: r.client.CoreV1().Events("")})
|
||||||
|
@@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package ipam
|
package ipam
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -26,9 +25,9 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -275,13 +274,13 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// test function
|
// test function
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
||||||
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{})
|
||||||
_, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
_, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
||||||
if err == nil && tc.ctrlCreateFail {
|
if err == nil && tc.ctrlCreateFail {
|
||||||
t.Fatalf("creating range allocator was expected to fail, but it did not")
|
t.Fatalf("creating range allocator was expected to fail, but it did not")
|
||||||
}
|
}
|
||||||
@@ -510,12 +509,12 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// test function
|
// test function
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
testFunc := func(tc testCase) {
|
testFunc := func(tc testCase) {
|
||||||
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
||||||
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
nodeList, _ := tc.fakeNodeHandler.List(tCtx, metav1.ListOptions{})
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
allocator, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
||||||
return
|
return
|
||||||
@@ -527,7 +526,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||||||
}
|
}
|
||||||
rangeAllocator.nodesSynced = test.AlwaysReady
|
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
go allocator.Run(ctx)
|
go allocator.Run(tCtx)
|
||||||
|
|
||||||
// this is a bit of white box testing
|
// this is a bit of white box testing
|
||||||
// pre allocate the cidrs as per the test
|
// pre allocate the cidrs as per the test
|
||||||
@@ -611,10 +610,10 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
testFunc := func(tc testCase) {
|
testFunc := func(tc testCase) {
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
allocator, err := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
||||||
}
|
}
|
||||||
@@ -625,7 +624,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
rangeAllocator.nodesSynced = test.AlwaysReady
|
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
go allocator.Run(ctx)
|
go allocator.Run(tCtx)
|
||||||
|
|
||||||
// this is a bit of white box testing
|
// this is a bit of white box testing
|
||||||
for setIdx, allocatedList := range tc.allocatedCIDRs {
|
for setIdx, allocatedList := range tc.allocatedCIDRs {
|
||||||
@@ -756,10 +755,10 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
testFunc := func(tc releaseTestCase) {
|
testFunc := func(tc releaseTestCase) {
|
||||||
// Initialize the range allocator.
|
// Initialize the range allocator.
|
||||||
allocator, _ := NewCIDRRangeAllocator(logger, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
allocator, _ := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
@@ -767,7 +766,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||||||
}
|
}
|
||||||
rangeAllocator.nodesSynced = test.AlwaysReady
|
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
go allocator.Run(ctx)
|
go allocator.Run(tCtx)
|
||||||
|
|
||||||
// this is a bit of white box testing
|
// this is a bit of white box testing
|
||||||
for setIdx, allocatedList := range tc.allocatedCIDRs {
|
for setIdx, allocatedList := range tc.allocatedCIDRs {
|
||||||
|
@@ -20,6 +20,7 @@ limitations under the License.
|
|||||||
package nodeipam
|
package nodeipam
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
@@ -33,7 +34,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func createLegacyIPAM(
|
func createLegacyIPAM(
|
||||||
logger klog.Logger,
|
ctx context.Context,
|
||||||
ic *Controller,
|
ic *Controller,
|
||||||
nodeInformer coreinformers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
@@ -59,10 +60,11 @@ func createLegacyIPAM(
|
|||||||
if len(clusterCIDRs) > 0 {
|
if len(clusterCIDRs) > 0 {
|
||||||
cidr = clusterCIDRs[0]
|
cidr = clusterCIDRs[0]
|
||||||
}
|
}
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
if len(clusterCIDRs) > 1 {
|
if len(clusterCIDRs) > 1 {
|
||||||
logger.Info("Multiple cidrs were configured with FromCluster or FromCloud. cidrs except first one were discarded")
|
logger.Info("Multiple cidrs were configured with FromCluster or FromCloud. cidrs except first one were discarded")
|
||||||
}
|
}
|
||||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, cidr, serviceCIDR, nodeCIDRMaskSizes[0])
|
ipamc, err := ipam.NewController(ctx, cfg, kubeClient, cloud, cidr, serviceCIDR, nodeCIDRMaskSizes[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error creating ipam controller: %w", err)
|
return nil, fmt.Errorf("error creating ipam controller: %w", err)
|
||||||
}
|
}
|
||||||
|
@@ -88,7 +88,6 @@ func NewNodeIpamController(
|
|||||||
nodeCIDRMaskSizes []int,
|
nodeCIDRMaskSizes []int,
|
||||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||||
|
|
||||||
logger := klog.FromContext(ctx)
|
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
return nil, fmt.Errorf("kubeClient is nil when starting Controller")
|
return nil, fmt.Errorf("kubeClient is nil when starting Controller")
|
||||||
}
|
}
|
||||||
@@ -110,7 +109,7 @@ func NewNodeIpamController(
|
|||||||
ic := &Controller{
|
ic := &Controller{
|
||||||
cloud: cloud,
|
cloud: cloud,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
eventBroadcaster: record.NewBroadcaster(),
|
eventBroadcaster: record.NewBroadcaster(record.WithContext(ctx)),
|
||||||
clusterCIDRs: clusterCIDRs,
|
clusterCIDRs: clusterCIDRs,
|
||||||
serviceCIDR: serviceCIDR,
|
serviceCIDR: serviceCIDR,
|
||||||
secondaryServiceCIDR: secondaryServiceCIDR,
|
secondaryServiceCIDR: secondaryServiceCIDR,
|
||||||
@@ -120,7 +119,7 @@ func NewNodeIpamController(
|
|||||||
// TODO: Abstract this check into a generic controller manager should run method.
|
// TODO: Abstract this check into a generic controller manager should run method.
|
||||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||||
var err error
|
var err error
|
||||||
ic.legacyIPAM, err = createLegacyIPAM(logger, ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSizes)
|
ic.legacyIPAM, err = createLegacyIPAM(ctx, ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSizes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -151,7 +150,7 @@ func (nc *Controller) Run(ctx context.Context) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start event processing pipeline.
|
// Start event processing pipeline.
|
||||||
nc.eventBroadcaster.StartStructuredLogging(0)
|
nc.eventBroadcaster.StartStructuredLogging(3)
|
||||||
nc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: nc.kubeClient.CoreV1().Events("")})
|
nc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: nc.kubeClient.CoreV1().Events("")})
|
||||||
defer nc.eventBroadcaster.Shutdown()
|
defer nc.eventBroadcaster.Shutdown()
|
||||||
klog.FromContext(ctx).Info("Starting ipam controller")
|
klog.FromContext(ctx).Info("Starting ipam controller")
|
||||||
|
@@ -27,7 +27,6 @@ import (
|
|||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type fakeController struct {
|
type fakeController struct {
|
||||||
@@ -38,7 +37,7 @@ func (f *fakeController) Run(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createLegacyIPAM(
|
func createLegacyIPAM(
|
||||||
logger klog.Logger,
|
ctx context.Context,
|
||||||
ic *Controller,
|
ic *Controller,
|
||||||
nodeInformer coreinformers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
|
@@ -323,7 +323,7 @@ func NewNodeLifecycleController(
|
|||||||
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
|
||||||
|
|
||||||
nc := &Controller{
|
nc := &Controller{
|
||||||
@@ -454,7 +454,7 @@ func (nc *Controller) Run(ctx context.Context) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
nc.broadcaster.StartStructuredLogging(0)
|
nc.broadcaster.StartStructuredLogging(3)
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
logger.Info("Sending events to api server")
|
logger.Info("Sending events to api server")
|
||||||
nc.broadcaster.StartRecordingToSink(
|
nc.broadcaster.StartRecordingToSink(
|
||||||
|
@@ -126,6 +126,7 @@ type HorizontalController struct {
|
|||||||
|
|
||||||
// NewHorizontalController creates a new HorizontalController.
|
// NewHorizontalController creates a new HorizontalController.
|
||||||
func NewHorizontalController(
|
func NewHorizontalController(
|
||||||
|
ctx context.Context,
|
||||||
evtNamespacer v1core.EventsGetter,
|
evtNamespacer v1core.EventsGetter,
|
||||||
scaleNamespacer scaleclient.ScalesGetter,
|
scaleNamespacer scaleclient.ScalesGetter,
|
||||||
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
|
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
|
||||||
@@ -140,8 +141,8 @@ func NewHorizontalController(
|
|||||||
delayOfInitialReadinessStatus time.Duration,
|
delayOfInitialReadinessStatus time.Duration,
|
||||||
containerResourceMetricsEnabled bool,
|
containerResourceMetricsEnabled bool,
|
||||||
) *HorizontalController {
|
) *HorizontalController {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
broadcaster.StartStructuredLogging(0)
|
broadcaster.StartStructuredLogging(3)
|
||||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||||
|
|
||||||
|
@@ -46,6 +46,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/monitor"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/monitor"
|
||||||
"k8s.io/kubernetes/pkg/controller/util/selectors"
|
"k8s.io/kubernetes/pkg/controller/util/selectors"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
|
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
|
||||||
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
|
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
|
||||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||||
@@ -767,7 +768,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
|||||||
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
||||||
defaultDownscalestabilizationWindow := 5 * time.Minute
|
defaultDownscalestabilizationWindow := 5 * time.Minute
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
hpaController := NewHorizontalController(
|
hpaController := NewHorizontalController(
|
||||||
|
tCtx,
|
||||||
eventClient.CoreV1(),
|
eventClient.CoreV1(),
|
||||||
testScaleClient,
|
testScaleClient,
|
||||||
testClient.AutoscalingV2(),
|
testClient.AutoscalingV2(),
|
||||||
@@ -5292,7 +5295,9 @@ func TestMultipleHPAs(t *testing.T) {
|
|||||||
|
|
||||||
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
hpaController := NewHorizontalController(
|
hpaController := NewHorizontalController(
|
||||||
|
tCtx,
|
||||||
testClient.CoreV1(),
|
testClient.CoreV1(),
|
||||||
testScaleClient,
|
testScaleClient,
|
||||||
testClient.AutoscalingV2(),
|
testClient.AutoscalingV2(),
|
||||||
@@ -5310,10 +5315,8 @@ func TestMultipleHPAs(t *testing.T) {
|
|||||||
hpaController.scaleUpEvents = scaleUpEventsMap
|
hpaController.scaleUpEvents = scaleUpEventsMap
|
||||||
hpaController.scaleDownEvents = scaleDownEventsMap
|
hpaController.scaleDownEvents = scaleDownEventsMap
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
informerFactory.Start(tCtx.Done())
|
||||||
defer cancel()
|
go hpaController.Run(tCtx, 5)
|
||||||
informerFactory.Start(ctx.Done())
|
|
||||||
go hpaController.Run(ctx, 5)
|
|
||||||
|
|
||||||
timeoutTime := time.After(15 * time.Second)
|
timeoutTime := time.After(15 * time.Second)
|
||||||
timeout := false
|
timeout := false
|
||||||
|
@@ -1,25 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2019 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package replicaset
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
klog.InitFlags(nil)
|
|
||||||
}
|
|
@@ -115,8 +115,9 @@ type ReplicaSetController struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
// NewReplicaSetController configures a replica set controller with the specified event recorder
|
||||||
func NewReplicaSetController(logger klog.Logger, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
func NewReplicaSetController(ctx context.Context, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
logger := klog.FromContext(ctx)
|
||||||
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
if err := metrics.Register(legacyregistry.Register); err != nil {
|
if err := metrics.Register(legacyregistry.Register); err != nil {
|
||||||
logger.Error(err, "unable to register metrics")
|
logger.Error(err, "unable to register metrics")
|
||||||
}
|
}
|
||||||
@@ -202,7 +203,7 @@ func (rsc *ReplicaSetController) Run(ctx context.Context, workers int) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
rsc.eventBroadcaster.StartStructuredLogging(0)
|
rsc.eventBroadcaster.StartStructuredLogging(3)
|
||||||
rsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: rsc.kubeClient.CoreV1().Events("")})
|
rsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: rsc.kubeClient.CoreV1().Events("")})
|
||||||
defer rsc.eventBroadcaster.Shutdown()
|
defer rsc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -50,10 +50,10 @@ import (
|
|||||||
utiltesting "k8s.io/client-go/util/testing"
|
utiltesting "k8s.io/client-go/util/testing"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
. "k8s.io/kubernetes/pkg/controller/testutil"
|
. "k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -64,9 +64,9 @@ var (
|
|||||||
func testNewReplicaSetControllerFromClient(tb testing.TB, client clientset.Interface, stopCh chan struct{}, burstReplicas int) (*ReplicaSetController, informers.SharedInformerFactory) {
|
func testNewReplicaSetControllerFromClient(tb testing.TB, client clientset.Interface, stopCh chan struct{}, burstReplicas int) (*ReplicaSetController, informers.SharedInformerFactory) {
|
||||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
logger, _ := ktesting.NewTestContext(tb)
|
tCtx := ktesting.Init(tb)
|
||||||
ret := NewReplicaSetController(
|
ret := NewReplicaSetController(
|
||||||
logger,
|
tCtx,
|
||||||
informers.Apps().V1().ReplicaSets(),
|
informers.Apps().V1().ReplicaSets(),
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
client,
|
client,
|
||||||
@@ -628,9 +628,9 @@ func TestWatchControllers(t *testing.T) {
|
|||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
manager := NewReplicaSetController(
|
manager := NewReplicaSetController(
|
||||||
logger,
|
tCtx,
|
||||||
informers.Apps().V1().ReplicaSets(),
|
informers.Apps().V1().ReplicaSets(),
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
client,
|
client,
|
||||||
@@ -659,7 +659,7 @@ func TestWatchControllers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start only the ReplicaSet watcher and the workqueue, send a watch event,
|
// Start only the ReplicaSet watcher and the workqueue, send a watch event,
|
||||||
// and make sure it hits the sync method.
|
// and make sure it hits the sync method.
|
||||||
go wait.UntilWithContext(ctx, manager.worker, 10*time.Millisecond)
|
go wait.UntilWithContext(tCtx, manager.worker, 10*time.Millisecond)
|
||||||
|
|
||||||
testRSSpec.Name = "foo"
|
testRSSpec.Name = "foo"
|
||||||
fakeWatch.Add(&testRSSpec)
|
fakeWatch.Add(&testRSSpec)
|
||||||
@@ -1189,15 +1189,15 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExpectationsOnRecreate(t *testing.T) {
|
func TestExpectationsOnRecreate(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
|
logger := tCtx.Logger()
|
||||||
manager := NewReplicaSetController(
|
manager := NewReplicaSetController(
|
||||||
logger,
|
tCtx,
|
||||||
f.Apps().V1().ReplicaSets(),
|
f.Apps().V1().ReplicaSets(),
|
||||||
f.Core().V1().Pods(),
|
f.Core().V1().Pods(),
|
||||||
client,
|
client,
|
||||||
@@ -1213,7 +1213,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
|
oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
|
||||||
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(ctx, oldRS, metav1.CreateOptions{})
|
oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(tCtx, oldRS, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -1226,7 +1226,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
t.Fatalf("initial RS didn't result in new item in the queue: %v", err)
|
t.Fatalf("initial RS didn't result in new item in the queue: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ok := manager.processNextWorkItem(ctx)
|
ok := manager.processNextWorkItem(tCtx)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("queue is shutting down")
|
t.Fatal("queue is shutting down")
|
||||||
}
|
}
|
||||||
@@ -1257,7 +1257,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
t.Fatal("Unexpected item in the queue")
|
t.Fatal("Unexpected item in the queue")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(ctx, oldRS.Name, metav1.DeleteOptions{})
|
err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(tCtx, oldRS.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -1294,7 +1294,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
|
|
||||||
newRS := oldRS.DeepCopy()
|
newRS := oldRS.DeepCopy()
|
||||||
newRS.UID = uuid.NewUUID()
|
newRS.UID = uuid.NewUUID()
|
||||||
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(ctx, newRS, metav1.CreateOptions{})
|
newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(tCtx, newRS, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -1312,7 +1312,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
|||||||
t.Fatalf("Re-creating RS didn't result in new item in the queue: %v", err)
|
t.Fatalf("Re-creating RS didn't result in new item in the queue: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ok = manager.processNextWorkItem(ctx)
|
ok = manager.processNextWorkItem(tCtx)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("Queue is shutting down!")
|
t.Fatal("Queue is shutting down!")
|
||||||
}
|
}
|
||||||
|
@@ -26,6 +26,8 @@ limitations under the License.
|
|||||||
package replication
|
package replication
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@@ -48,8 +50,9 @@ type ReplicationManager struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewReplicationManager configures a replication manager with the specified event recorder
|
// NewReplicationManager configures a replication manager with the specified event recorder
|
||||||
func NewReplicationManager(logger klog.Logger, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
|
func NewReplicationManager(ctx context.Context, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
logger := klog.FromContext(ctx)
|
||||||
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
return &ReplicationManager{
|
return &ReplicationManager{
|
||||||
*replicaset.NewBaseController(logger, informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas,
|
*replicaset.NewBaseController(logger, informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas,
|
||||||
v1.SchemeGroupVersion.WithKind("ReplicationController"),
|
v1.SchemeGroupVersion.WithKind("ReplicationController"),
|
||||||
|
@@ -394,7 +394,7 @@ func (ec *Controller) Run(ctx context.Context, workers int) {
|
|||||||
logger.Info("Starting ephemeral volume controller")
|
logger.Info("Starting ephemeral volume controller")
|
||||||
defer logger.Info("Shutting down ephemeral volume controller")
|
defer logger.Info("Shutting down ephemeral volume controller")
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
eventBroadcaster.StartLogging(klog.Infof)
|
eventBroadcaster.StartLogging(klog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ec.kubeClient.CoreV1().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ec.kubeClient.CoreV1().Events("")})
|
||||||
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "resource_claim"})
|
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "resource_claim"})
|
||||||
|
@@ -67,11 +67,12 @@ const (
|
|||||||
|
|
||||||
// NewController returns a new *Controller.
|
// NewController returns a new *Controller.
|
||||||
func NewController(
|
func NewController(
|
||||||
|
ctx context.Context,
|
||||||
serviceCIDRInformer networkinginformers.ServiceCIDRInformer,
|
serviceCIDRInformer networkinginformers.ServiceCIDRInformer,
|
||||||
ipAddressInformer networkinginformers.IPAddressInformer,
|
ipAddressInformer networkinginformers.IPAddressInformer,
|
||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
) *Controller {
|
) *Controller {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
||||||
c := &Controller{
|
c := &Controller{
|
||||||
client: client,
|
client: client,
|
||||||
@@ -129,7 +130,7 @@ func (c *Controller) Run(ctx context.Context, workers int) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
defer c.queue.ShutDown()
|
defer c.queue.ShutDown()
|
||||||
|
|
||||||
c.eventBroadcaster.StartStructuredLogging(0)
|
c.eventBroadcaster.StartStructuredLogging(3)
|
||||||
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
|
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
|
||||||
defer c.eventBroadcaster.Shutdown()
|
defer c.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
@@ -43,7 +44,7 @@ type testController struct {
|
|||||||
ipaddressesStore cache.Store
|
ipaddressesStore cache.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func newController(t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips []*networkingapiv1alpha1.IPAddress) (*fake.Clientset, *testController) {
|
func newController(ctx context.Context, t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips []*networkingapiv1alpha1.IPAddress) (*fake.Clientset, *testController) {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
|
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
@@ -65,6 +66,7 @@ func newController(t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
controller := NewController(
|
controller := NewController(
|
||||||
|
ctx,
|
||||||
serviceCIDRInformer,
|
serviceCIDRInformer,
|
||||||
ipAddressInformer,
|
ipAddressInformer,
|
||||||
client)
|
client)
|
||||||
@@ -233,11 +235,12 @@ func TestControllerSync(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
client, controller := newController(t, tc.cidrs, tc.ips)
|
tCtx := ktesting.Init(t)
|
||||||
|
client, controller := newController(tCtx, t, tc.cidrs, tc.ips)
|
||||||
// server side apply does not play well with fake client go
|
// server side apply does not play well with fake client go
|
||||||
// so we skup the errors and only assert on the actions
|
// so we skup the errors and only assert on the actions
|
||||||
// https://github.com/kubernetes/kubernetes/issues/99953
|
// https://github.com/kubernetes/kubernetes/issues/99953
|
||||||
_ = controller.sync(context.Background(), tc.cidrSynced)
|
_ = controller.sync(tCtx, tc.cidrSynced)
|
||||||
expectAction(t, client.Actions(), tc.actions)
|
expectAction(t, client.Actions(), tc.actions)
|
||||||
|
|
||||||
})
|
})
|
||||||
@@ -423,13 +426,14 @@ func TestController_canDeleteCIDR(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
_, controller := newController(t, tc.cidrs, tc.ips)
|
tCtx := ktesting.Init(t)
|
||||||
|
_, controller := newController(tCtx, t, tc.cidrs, tc.ips)
|
||||||
err := controller.syncCIDRs()
|
err := controller.syncCIDRs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := controller.canDeleteCIDR(context.Background(), tc.cidrSynced)
|
got, err := controller.canDeleteCIDR(tCtx, tc.cidrSynced)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -528,7 +532,8 @@ func TestController_ipToCidrs(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
_, controller := newController(t, tt.cidrs, nil)
|
tCtx := ktesting.Init(t)
|
||||||
|
_, controller := newController(tCtx, t, tt.cidrs, nil)
|
||||||
err := controller.syncCIDRs()
|
err := controller.syncCIDRs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -584,7 +589,8 @@ func TestController_cidrToCidrs(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
_, controller := newController(t, tt.cidrs, nil)
|
tCtx := ktesting.Init(t)
|
||||||
|
_, controller := newController(tCtx, t, tt.cidrs, nil)
|
||||||
err := controller.syncCIDRs()
|
err := controller.syncCIDRs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@@ -86,7 +86,7 @@ func NewStatefulSetController(
|
|||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
) *StatefulSetController {
|
) *StatefulSetController {
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"})
|
||||||
ssc := &StatefulSetController{
|
ssc := &StatefulSetController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
@@ -151,7 +151,7 @@ func (ssc *StatefulSetController) Run(ctx context.Context, workers int) {
|
|||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
ssc.eventBroadcaster.StartStructuredLogging(0)
|
ssc.eventBroadcaster.StartStructuredLogging(3)
|
||||||
ssc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ssc.kubeClient.CoreV1().Events("")})
|
ssc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ssc.kubeClient.CoreV1().Events("")})
|
||||||
defer ssc.eventBroadcaster.Shutdown()
|
defer ssc.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -188,7 +188,7 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
|
|||||||
func New(ctx context.Context, c clientset.Interface, podInformer corev1informers.PodInformer, nodeInformer corev1informers.NodeInformer, controllerName string) (*Controller, error) {
|
func New(ctx context.Context, c clientset.Interface, podInformer corev1informers.PodInformer, nodeInformer corev1informers.NodeInformer, controllerName string) (*Controller, error) {
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
metrics.Register()
|
metrics.Register()
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
||||||
|
|
||||||
podIndexer := podInformer.Informer().GetIndexer()
|
podIndexer := podInformer.Informer().GetIndexer()
|
||||||
@@ -286,7 +286,7 @@ func (tc *Controller) Run(ctx context.Context) {
|
|||||||
defer logger.Info("Shutting down controller", "controller", tc.name)
|
defer logger.Info("Shutting down controller", "controller", tc.name)
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
tc.broadcaster.StartStructuredLogging(0)
|
tc.broadcaster.StartStructuredLogging(3)
|
||||||
if tc.client != nil {
|
if tc.client != nil {
|
||||||
logger.Info("Sending events to api server")
|
logger.Info("Sending events to api server")
|
||||||
tc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: tc.client.CoreV1().Events("")})
|
tc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: tc.client.CoreV1().Events("")})
|
||||||
|
@@ -70,8 +70,8 @@ type Controller struct {
|
|||||||
|
|
||||||
// New creates an instance of Controller
|
// New creates an instance of Controller
|
||||||
func New(ctx context.Context, jobInformer batchinformers.JobInformer, client clientset.Interface) *Controller {
|
func New(ctx context.Context, jobInformer batchinformers.JobInformer, client clientset.Interface) *Controller {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
eventBroadcaster.StartStructuredLogging(3)
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||||
|
|
||||||
metrics.Register()
|
metrics.Register()
|
||||||
|
@@ -106,7 +106,7 @@ type AttachDetachController interface {
|
|||||||
|
|
||||||
// NewAttachDetachController returns a new instance of AttachDetachController.
|
// NewAttachDetachController returns a new instance of AttachDetachController.
|
||||||
func NewAttachDetachController(
|
func NewAttachDetachController(
|
||||||
logger klog.Logger,
|
ctx context.Context,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
podInformer coreinformers.PodInformer,
|
podInformer coreinformers.PodInformer,
|
||||||
nodeInformer coreinformers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
@@ -123,6 +123,8 @@ func NewAttachDetachController(
|
|||||||
disableForceDetachOnTimeout bool,
|
disableForceDetachOnTimeout bool,
|
||||||
timerConfig TimerConfig) (AttachDetachController, error) {
|
timerConfig TimerConfig) (AttachDetachController, error) {
|
||||||
|
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
|
|
||||||
adc := &attachDetachController{
|
adc := &attachDetachController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
pvcLister: pvcInformer.Lister(),
|
pvcLister: pvcInformer.Lister(),
|
||||||
@@ -151,7 +153,7 @@ func NewAttachDetachController(
|
|||||||
return nil, fmt.Errorf("could not initialize volume plugins for Attach/Detach Controller: %w", err)
|
return nil, fmt.Errorf("could not initialize volume plugins for Attach/Detach Controller: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
adc.broadcaster = record.NewBroadcaster()
|
adc.broadcaster = record.NewBroadcaster(record.WithContext(ctx))
|
||||||
recorder := adc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
|
recorder := adc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
|
||||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
|
|
||||||
@@ -332,7 +334,7 @@ func (adc *attachDetachController) Run(ctx context.Context) {
|
|||||||
defer adc.pvcQueue.ShutDown()
|
defer adc.pvcQueue.ShutDown()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
adc.broadcaster.StartStructuredLogging(0)
|
adc.broadcaster.StartStructuredLogging(3)
|
||||||
adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")})
|
adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")})
|
||||||
defer adc.broadcaster.Shutdown()
|
defer adc.broadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -30,13 +30,13 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
kcache "k8s.io/client-go/tools/cache"
|
kcache "k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/csi"
|
"k8s.io/kubernetes/pkg/volume/csi"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -50,9 +50,9 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
|
|||||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
_, err := NewAttachDetachController(
|
_, err := NewAttachDetachController(
|
||||||
logger,
|
tCtx,
|
||||||
fakeKubeClient,
|
fakeKubeClient,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Nodes(),
|
informerFactory.Core().V1().Nodes(),
|
||||||
@@ -81,11 +81,9 @@ func Test_AttachDetachControllerStateOfWorldPopulators_Positive(t *testing.T) {
|
|||||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
adcObj, err := NewAttachDetachController(
|
adcObj, err := NewAttachDetachController(
|
||||||
logger,
|
tCtx,
|
||||||
fakeKubeClient,
|
fakeKubeClient,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Nodes(),
|
informerFactory.Core().V1().Nodes(),
|
||||||
@@ -109,8 +107,8 @@ func Test_AttachDetachControllerStateOfWorldPopulators_Positive(t *testing.T) {
|
|||||||
adc := adcObj.(*attachDetachController)
|
adc := adcObj.(*attachDetachController)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(tCtx.Done())
|
||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(tCtx.Done())
|
||||||
|
|
||||||
err = adc.populateActualStateOfWorld(logger)
|
err = adc.populateActualStateOfWorld(logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -206,11 +204,9 @@ func BenchmarkPopulateActualStateOfWorld(b *testing.B) {
|
|||||||
}
|
}
|
||||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||||
|
|
||||||
logger, ctx := ktesting.NewTestContext(b)
|
logger, tCtx := ktesting.NewTestContext(b)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
adcObj, err := NewAttachDetachController(
|
adcObj, err := NewAttachDetachController(
|
||||||
logger,
|
tCtx,
|
||||||
fakeKubeClient,
|
fakeKubeClient,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Nodes(),
|
informerFactory.Core().V1().Nodes(),
|
||||||
@@ -234,8 +230,8 @@ func BenchmarkPopulateActualStateOfWorld(b *testing.B) {
|
|||||||
adc := adcObj.(*attachDetachController)
|
adc := adcObj.(*attachDetachController)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(tCtx.Done())
|
||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(tCtx.Done())
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
err = adc.populateActualStateOfWorld(logger)
|
err = adc.populateActualStateOfWorld(logger)
|
||||||
@@ -267,11 +263,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
var podsNum, extraPodsNum, nodesNum, i int
|
var podsNum, extraPodsNum, nodesNum, i int
|
||||||
|
|
||||||
// Create the controller
|
// Create the controller
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
adcObj, err := NewAttachDetachController(
|
adcObj, err := NewAttachDetachController(
|
||||||
logger,
|
tCtx,
|
||||||
fakeKubeClient,
|
fakeKubeClient,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Nodes(),
|
informerFactory.Core().V1().Nodes(),
|
||||||
@@ -295,7 +289,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
adc := adcObj.(*attachDetachController)
|
adc := adcObj.(*attachDetachController)
|
||||||
|
|
||||||
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@@ -305,7 +299,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
podInformer.GetIndexer().Add(&podToAdd)
|
podInformer.GetIndexer().Add(&podToAdd)
|
||||||
podsNum++
|
podsNum++
|
||||||
}
|
}
|
||||||
nodes, err := fakeKubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
nodes, err := fakeKubeClient.CoreV1().Nodes().List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@@ -315,7 +309,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
nodesNum++
|
nodesNum++
|
||||||
}
|
}
|
||||||
|
|
||||||
csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(context.TODO(), metav1.ListOptions{})
|
csiNodes, err := fakeKubeClient.StorageV1().CSINodes().List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@@ -324,9 +318,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
csiNodeInformer.GetIndexer().Add(&csiNodeToAdd)
|
csiNodeInformer.GetIndexer().Add(&csiNodeToAdd)
|
||||||
}
|
}
|
||||||
|
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(tCtx.Done())
|
||||||
|
|
||||||
if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(),
|
if !kcache.WaitForNamedCacheSync("attach detach", tCtx.Done(),
|
||||||
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
||||||
informerFactory.Core().V1().Nodes().Informer().HasSynced,
|
informerFactory.Core().V1().Nodes().Informer().HasSynced,
|
||||||
informerFactory.Storage().V1().CSINodes().Informer().HasSynced) {
|
informerFactory.Storage().V1().CSINodes().Informer().HasSynced) {
|
||||||
@@ -382,7 +376,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
for _, newPod := range extraPods1 {
|
for _, newPod := range extraPods1 {
|
||||||
// Add a new pod between ASW and DSW ppoulators
|
// Add a new pod between ASW and DSW ppoulators
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(tCtx, newPod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
@@ -399,7 +393,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
|
|
||||||
for _, newPod := range extraPods2 {
|
for _, newPod := range extraPods2 {
|
||||||
// Add a new pod between DSW ppoulator and reconciler run
|
// Add a new pod between DSW ppoulator and reconciler run
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(tCtx, newPod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
@@ -407,8 +401,8 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
|
|||||||
podInformer.GetIndexer().Add(newPod)
|
podInformer.GetIndexer().Add(newPod)
|
||||||
}
|
}
|
||||||
|
|
||||||
go adc.reconciler.Run(ctx)
|
go adc.reconciler.Run(tCtx)
|
||||||
go adc.desiredStateOfWorldPopulator.Run(ctx)
|
go adc.desiredStateOfWorldPopulator.Run(tCtx)
|
||||||
|
|
||||||
time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once
|
time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once
|
||||||
|
|
||||||
@@ -533,11 +527,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer()
|
vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer()
|
||||||
|
|
||||||
// Create the controller
|
// Create the controller
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, tCtx := ktesting.NewTestContext(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
adcObj, err := NewAttachDetachController(
|
adcObj, err := NewAttachDetachController(
|
||||||
logger,
|
tCtx,
|
||||||
fakeKubeClient,
|
fakeKubeClient,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Nodes(),
|
informerFactory.Core().V1().Nodes(),
|
||||||
@@ -560,7 +552,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
adc := adcObj.(*attachDetachController)
|
adc := adcObj.(*attachDetachController)
|
||||||
|
|
||||||
// Add existing objects (created by testplugin) to the respective informers
|
// Add existing objects (created by testplugin) to the respective informers
|
||||||
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@@ -568,7 +560,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
podToAdd := pod
|
podToAdd := pod
|
||||||
podInformer.GetIndexer().Add(&podToAdd)
|
podInformer.GetIndexer().Add(&podToAdd)
|
||||||
}
|
}
|
||||||
nodes, err := fakeKubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
nodes, err := fakeKubeClient.CoreV1().Nodes().List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
@@ -597,7 +589,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = adc.kubeClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{})
|
_, err = adc.kubeClient.CoreV1().Nodes().Update(tCtx, newNode, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
@@ -606,7 +598,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
// Create and add objects requested by the test
|
// Create and add objects requested by the test
|
||||||
if tc.podName != "" {
|
if tc.podName != "" {
|
||||||
newPod := controllervolumetesting.NewPodWithVolume(tc.podName, tc.volName, tc.podNodeName)
|
newPod := controllervolumetesting.NewPodWithVolume(tc.podName, tc.volName, tc.podNodeName)
|
||||||
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(context.TODO(), newPod, metav1.CreateOptions{})
|
_, err = adc.kubeClient.CoreV1().Pods(newPod.ObjectMeta.Namespace).Create(tCtx, newPod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pod: <%v>", err)
|
||||||
}
|
}
|
||||||
@@ -621,7 +613,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
// Otherwise use NFS, which is not subject to migration.
|
// Otherwise use NFS, which is not subject to migration.
|
||||||
newPv = controllervolumetesting.NewNFSPV(tc.pvName, tc.volName)
|
newPv = controllervolumetesting.NewNFSPV(tc.pvName, tc.volName)
|
||||||
}
|
}
|
||||||
_, err = adc.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), newPv, metav1.CreateOptions{})
|
_, err = adc.kubeClient.CoreV1().PersistentVolumes().Create(tCtx, newPv, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new pv: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new pv: <%v>", err)
|
||||||
}
|
}
|
||||||
@@ -629,7 +621,7 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
}
|
}
|
||||||
if tc.vaName != "" {
|
if tc.vaName != "" {
|
||||||
newVa := controllervolumetesting.NewVolumeAttachment(tc.vaName, tc.pvName, tc.vaNodeName, tc.vaAttachStatus)
|
newVa := controllervolumetesting.NewVolumeAttachment(tc.vaName, tc.pvName, tc.vaNodeName, tc.vaAttachStatus)
|
||||||
_, err = adc.kubeClient.StorageV1().VolumeAttachments().Create(context.TODO(), newVa, metav1.CreateOptions{})
|
_, err = adc.kubeClient.StorageV1().VolumeAttachments().Create(tCtx, newVa, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Run failed with error. Failed to create a new volumeAttachment: <%v>", err)
|
t.Fatalf("Run failed with error. Failed to create a new volumeAttachment: <%v>", err)
|
||||||
}
|
}
|
||||||
@@ -637,9 +629,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Makesure the informer cache is synced
|
// Makesure the informer cache is synced
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(tCtx.Done())
|
||||||
|
|
||||||
if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(),
|
if !kcache.WaitForNamedCacheSync("attach detach", tCtx.Done(),
|
||||||
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
informerFactory.Core().V1().Pods().Informer().HasSynced,
|
||||||
informerFactory.Core().V1().Nodes().Informer().HasSynced,
|
informerFactory.Core().V1().Nodes().Informer().HasSynced,
|
||||||
informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced,
|
informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced,
|
||||||
@@ -659,8 +651,8 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
|
|||||||
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
|
||||||
}
|
}
|
||||||
// Run reconciler and DSW populator loops
|
// Run reconciler and DSW populator loops
|
||||||
go adc.reconciler.Run(ctx)
|
go adc.reconciler.Run(tCtx)
|
||||||
go adc.desiredStateOfWorldPopulator.Run(ctx)
|
go adc.desiredStateOfWorldPopulator.Run(tCtx)
|
||||||
if tc.csiMigration {
|
if tc.csiMigration {
|
||||||
verifyExpectedVolumeState(t, adc, tc)
|
verifyExpectedVolumeState(t, adc, tc)
|
||||||
} else {
|
} else {
|
||||||
|
@@ -76,6 +76,7 @@ type ephemeralController struct {
|
|||||||
|
|
||||||
// NewController creates an ephemeral volume controller.
|
// NewController creates an ephemeral volume controller.
|
||||||
func NewController(
|
func NewController(
|
||||||
|
ctx context.Context,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
podInformer coreinformers.PodInformer,
|
podInformer coreinformers.PodInformer,
|
||||||
pvcInformer coreinformers.PersistentVolumeClaimInformer) (Controller, error) {
|
pvcInformer coreinformers.PersistentVolumeClaimInformer) (Controller, error) {
|
||||||
@@ -92,7 +93,7 @@ func NewController(
|
|||||||
|
|
||||||
ephemeralvolumemetrics.RegisterMetrics()
|
ephemeralvolumemetrics.RegisterMetrics()
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
eventBroadcaster.StartLogging(klog.Infof)
|
eventBroadcaster.StartLogging(klog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||||
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"})
|
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"})
|
||||||
|
@@ -22,6 +22,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -35,8 +36,6 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/volume/ephemeral/metrics"
|
ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/volume/ephemeral/metrics"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -146,7 +145,7 @@ func TestSyncHandler(t *testing.T) {
|
|||||||
podInformer := informerFactory.Core().V1().Pods()
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
|
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
|
||||||
|
|
||||||
c, err := NewController(fakeKubeClient, podInformer, pvcInformer)
|
c, err := NewController(ctx, fakeKubeClient, podInformer, pvcInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating ephemeral controller : %v", err)
|
t.Fatalf("error creating ephemeral controller : %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -100,6 +100,7 @@ type expandController struct {
|
|||||||
|
|
||||||
// NewExpandController expands the pvs
|
// NewExpandController expands the pvs
|
||||||
func NewExpandController(
|
func NewExpandController(
|
||||||
|
ctx context.Context,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||||
cloud cloudprovider.Interface,
|
cloud cloudprovider.Interface,
|
||||||
@@ -121,8 +122,8 @@ func NewExpandController(
|
|||||||
return nil, fmt.Errorf("could not initialize volume plugins for Expand Controller : %+v", err)
|
return nil, fmt.Errorf("could not initialize volume plugins for Expand Controller : %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
eventBroadcaster.StartStructuredLogging(3)
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||||
expc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
expc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
||||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||||
|
@@ -42,6 +42,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSyncHandler(t *testing.T) {
|
func TestSyncHandler(t *testing.T) {
|
||||||
@@ -91,6 +92,7 @@ func TestSyncHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
test := tc
|
test := tc
|
||||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||||
@@ -106,7 +108,7 @@ func TestSyncHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
allPlugins := []volume.VolumePlugin{}
|
allPlugins := []volume.VolumePlugin{}
|
||||||
translator := csitrans.New()
|
translator := csitrans.New()
|
||||||
expc, err := NewExpandController(fakeKubeClient, pvcInformer, nil, allPlugins, translator, csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate))
|
expc, err := NewExpandController(tCtx, fakeKubeClient, pvcInformer, nil, allPlugins, translator, csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating expand controller : %v", err)
|
t.Fatalf("error creating expand controller : %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -77,7 +77,7 @@ type ControllerParameters struct {
|
|||||||
|
|
||||||
// NewController creates a new PersistentVolume controller
|
// NewController creates a new PersistentVolume controller
|
||||||
func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) {
|
func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
|
||||||
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
||||||
|
|
||||||
controller := &PersistentVolumeController{
|
controller := &PersistentVolumeController{
|
||||||
@@ -305,7 +305,7 @@ func (ctrl *PersistentVolumeController) Run(ctx context.Context) {
|
|||||||
defer ctrl.volumeQueue.ShutDown()
|
defer ctrl.volumeQueue.ShutDown()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
ctrl.eventBroadcaster.StartStructuredLogging(0)
|
ctrl.eventBroadcaster.StartStructuredLogging(3)
|
||||||
ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")})
|
ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")})
|
||||||
defer ctrl.eventBroadcaster.Shutdown()
|
defer ctrl.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
|
@@ -30,11 +30,11 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
clientbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
|
clientbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/cronjob"
|
"k8s.io/kubernetes/pkg/controller/cronjob"
|
||||||
"k8s.io/kubernetes/pkg/controller/job"
|
"k8s.io/kubernetes/pkg/controller/job"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *cronjob.ControllerV2, *job.Controller, informers.SharedInformerFactory, clientset.Interface) {
|
func setup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *cronjob.ControllerV2, *job.Controller, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
@@ -148,16 +148,13 @@ func validateJobAndPod(t *testing.T, clientSet clientset.Interface, namespace st
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCronJobLaunchesPodAndCleansUp(t *testing.T) {
|
func TestCronJobLaunchesPodAndCleansUp(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
closeFn, cjc, jc, informerSet, clientSet := setup(ctx, t)
|
closeFn, cjc, jc, informerSet, clientSet := setup(tCtx, t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
|
|
||||||
// When shutting down, cancel must be called before closeFn.
|
// When shutting down, cancel must be called before closeFn.
|
||||||
// We simply call it multiple times.
|
defer tCtx.Cancel("test has completed")
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cronJobName := "foo"
|
cronJobName := "foo"
|
||||||
namespaceName := "simple-cronjob-test"
|
namespaceName := "simple-cronjob-test"
|
||||||
@@ -167,11 +164,11 @@ func TestCronJobLaunchesPodAndCleansUp(t *testing.T) {
|
|||||||
|
|
||||||
cjClient := clientSet.BatchV1().CronJobs(ns.Name)
|
cjClient := clientSet.BatchV1().CronJobs(ns.Name)
|
||||||
|
|
||||||
informerSet.Start(ctx.Done())
|
informerSet.Start(tCtx.Done())
|
||||||
go cjc.Run(ctx, 1)
|
go cjc.Run(tCtx, 1)
|
||||||
go jc.Run(ctx, 1)
|
go jc.Run(tCtx, 1)
|
||||||
|
|
||||||
_, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{})
|
_, err := cjClient.Create(tCtx, newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create CronJob: %v", err)
|
t.Fatalf("Failed to create CronJob: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||||
@@ -105,7 +104,6 @@ func newDeployment(name, ns string, replicas int32) *apps.Deployment {
|
|||||||
func dcSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
|
func dcSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
|
||||||
|
|
||||||
config := restclient.CopyConfig(server.ClientConfig)
|
config := restclient.CopyConfig(server.ClientConfig)
|
||||||
clientSet, err := clientset.NewForConfig(config)
|
clientSet, err := clientset.NewForConfig(config)
|
||||||
@@ -126,7 +124,7 @@ func dcSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFu
|
|||||||
t.Fatalf("error creating Deployment controller: %v", err)
|
t.Fatalf("error creating Deployment controller: %v", err)
|
||||||
}
|
}
|
||||||
rm := replicaset.NewReplicaSetController(
|
rm := replicaset.NewReplicaSetController(
|
||||||
logger,
|
ctx,
|
||||||
informers.Apps().V1().ReplicaSets(),
|
informers.Apps().V1().ReplicaSets(),
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")),
|
||||||
|
@@ -539,14 +539,15 @@ func createPDBUsingRemovedAPI(ctx context.Context, etcdClient *clientv3.Client,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPatchCompatibility(t *testing.T) {
|
func TestPatchCompatibility(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
tCtx := ktesting.Init(t)
|
||||||
|
s, pdbc, _, clientSet, _, _ := setup(tCtx, t)
|
||||||
s, pdbc, _, clientSet, _, _ := setup(ctx, t)
|
|
||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
// Even though pdbc isn't used in this test, its creation is already
|
// Even though pdbc isn't used in this test, its creation is already
|
||||||
// spawning some goroutines. So we need to run it to ensure they won't leak.
|
// spawning some goroutines. So we need to run it to ensure they won't leak.
|
||||||
cancel()
|
// We can't cancel immediately but later, because when the context is canceled,
|
||||||
pdbc.Run(ctx)
|
// the event broadcaster will be shut down .
|
||||||
|
defer tCtx.Cancel("cleaning up")
|
||||||
|
go pdbc.Run(tCtx)
|
||||||
|
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
|
@@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package dualstack
|
package dualstack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -44,11 +43,9 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
return map[string]string{"foo": "bar"}
|
return map[string]string{"foo": "bar"}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
|
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
|
||||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
@@ -59,7 +56,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -88,11 +85,12 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if _, err := client.CoreV1().Nodes().Create(ctx, testNode, metav1.CreateOptions{}); err != nil {
|
if _, err := client.CoreV1().Nodes().Create(tCtx, testNode, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
t.Fatalf("Failed to create Node %q: %v", testNode.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
epController := endpoint.NewEndpointController(
|
epController := endpoint.NewEndpointController(
|
||||||
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
@@ -100,7 +98,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
epsController := endpointslice.NewController(
|
epsController := endpointslice.NewController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@@ -110,10 +108,10 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
informers.Start(ctx.Done())
|
informers.Start(tCtx.Done())
|
||||||
// use only one worker to serialize the updates
|
// use only one worker to serialize the updates
|
||||||
go epController.Run(ctx, 1)
|
go epController.Run(tCtx, 1)
|
||||||
go epsController.Run(ctx, 1)
|
go epsController.Run(tCtx, 1)
|
||||||
|
|
||||||
var testcases = []struct {
|
var testcases = []struct {
|
||||||
name string
|
name string
|
||||||
@@ -170,7 +168,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{})
|
createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -181,7 +179,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
Phase: v1.PodRunning,
|
Phase: v1.PodRunning,
|
||||||
PodIPs: []v1.PodIP{{IP: podIPbyFamily[v1.IPv4Protocol]}, {IP: podIPbyFamily[v1.IPv6Protocol]}},
|
PodIPs: []v1.PodIP{{IP: podIPbyFamily[v1.IPv4Protocol]}, {IP: podIPbyFamily[v1.IPv6Protocol]}},
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -209,7 +207,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a service
|
// create a service
|
||||||
_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating service: %v", err)
|
t.Fatalf("Error creating service: %v", err)
|
||||||
}
|
}
|
||||||
@@ -218,7 +216,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
// legacy endpoints are not dual stack
|
// legacy endpoints are not dual stack
|
||||||
// and use the address of the first IP family
|
// and use the address of the first IP family
|
||||||
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
e, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
|
e, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Error fetching endpoints: %v", err)
|
t.Logf("Error fetching endpoints: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -240,7 +238,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
// wait until the endpoint slices are created
|
// wait until the endpoint slices are created
|
||||||
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
lSelector := discovery.LabelServiceName + "=" + svc.Name
|
lSelector := discovery.LabelServiceName + "=" + svc.Name
|
||||||
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector})
|
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(tCtx, metav1.ListOptions{LabelSelector: lSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Error listing EndpointSlices: %v", err)
|
t.Logf("Error listing EndpointSlices: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package endpoints
|
package endpoints
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -33,6 +32,7 @@ import (
|
|||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEndpointUpdates(t *testing.T) {
|
func TestEndpointUpdates(t *testing.T) {
|
||||||
@@ -47,7 +47,9 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(client, 0)
|
informers := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
epController := endpoint.NewEndpointController(
|
epController := endpoint.NewEndpointController(
|
||||||
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
@@ -55,10 +57,8 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
0)
|
0)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
go epController.Run(tCtx, 1)
|
||||||
informers.Start(ctx.Done())
|
|
||||||
go epController.Run(ctx, 1)
|
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-updates", t)
|
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-updates", t)
|
||||||
@@ -82,7 +82,7 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{})
|
createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -92,14 +92,14 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
Phase: v1.PodRunning,
|
Phase: v1.PodRunning,
|
||||||
PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}},
|
PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}},
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a service associated to the pod
|
// Create a service associated to the pod
|
||||||
svc := newService(ns.Name, "foo1")
|
svc := newService(ns.Name, "foo1")
|
||||||
svc1, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{})
|
svc1, err := client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
||||||
}
|
}
|
||||||
@@ -107,7 +107,7 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
// Obtain ResourceVersion of the new endpoint created
|
// Obtain ResourceVersion of the new endpoint created
|
||||||
var resVersion string
|
var resVersion string
|
||||||
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
|
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error fetching endpoints: %v", err)
|
t.Logf("error fetching endpoints: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -120,7 +120,7 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
|
|
||||||
// Force recomputation on the endpoint controller
|
// Force recomputation on the endpoint controller
|
||||||
svc1.SetAnnotations(map[string]string{"foo": "bar"})
|
svc1.SetAnnotations(map[string]string{"foo": "bar"})
|
||||||
_, err = client.CoreV1().Services(ns.Name).Update(ctx, svc1, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Update(tCtx, svc1, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to update service %s: %v", svc1.Name, err)
|
t.Fatalf("Failed to update service %s: %v", svc1.Name, err)
|
||||||
}
|
}
|
||||||
@@ -130,13 +130,13 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
// was recomputed before asserting, since we only have 1 worker
|
// was recomputed before asserting, since we only have 1 worker
|
||||||
// in the endpoint controller
|
// in the endpoint controller
|
||||||
svc2 := newService(ns.Name, "foo2")
|
svc2 := newService(ns.Name, "foo2")
|
||||||
_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc2, metav1.CreateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Create(tCtx, svc2, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc2.Name, metav1.GetOptions{})
|
_, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc2.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error fetching endpoints: %v", err)
|
t.Logf("error fetching endpoints: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -148,7 +148,7 @@ func TestEndpointUpdates(t *testing.T) {
|
|||||||
|
|
||||||
// the endpoint controller should not update the endpoint created for the original
|
// the endpoint controller should not update the endpoint created for the original
|
||||||
// service since nothing has changed, the resource version has to be the same
|
// service since nothing has changed, the resource version has to be the same
|
||||||
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
|
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error fetching endpoints: %v", err)
|
t.Fatalf("error fetching endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@@ -173,7 +173,9 @@ func TestExternalNameToClusterIPTransition(t *testing.T) {
|
|||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(client, 0)
|
informers := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
epController := endpoint.NewEndpointController(
|
epController := endpoint.NewEndpointController(
|
||||||
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
@@ -181,10 +183,8 @@ func TestExternalNameToClusterIPTransition(t *testing.T) {
|
|||||||
0)
|
0)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
go epController.Run(tCtx, 1)
|
||||||
informers.Start(ctx.Done())
|
|
||||||
go epController.Run(ctx, 1)
|
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-updates", t)
|
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-updates", t)
|
||||||
@@ -208,7 +208,7 @@ func TestExternalNameToClusterIPTransition(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{})
|
createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -218,20 +218,20 @@ func TestExternalNameToClusterIPTransition(t *testing.T) {
|
|||||||
Phase: v1.PodRunning,
|
Phase: v1.PodRunning,
|
||||||
PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}},
|
PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}},
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an ExternalName service associated to the pod
|
// Create an ExternalName service associated to the pod
|
||||||
svc := newExternalNameService(ns.Name, "foo1")
|
svc := newExternalNameService(ns.Name, "foo1")
|
||||||
svc1, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{})
|
svc1, err := client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
|
||||||
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
|
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected no endpoints for externalName service, got: %v", endpoints)
|
t.Errorf("expected no endpoints for externalName service, got: %v", endpoints)
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -244,13 +244,13 @@ func TestExternalNameToClusterIPTransition(t *testing.T) {
|
|||||||
|
|
||||||
// update service to ClusterIP type and verify endpoint was created
|
// update service to ClusterIP type and verify endpoint was created
|
||||||
svc1.Spec.Type = v1.ServiceTypeClusterIP
|
svc1.Spec.Type = v1.ServiceTypeClusterIP
|
||||||
_, err = client.CoreV1().Services(ns.Name).Update(ctx, svc1, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Update(tCtx, svc1, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to update service %s: %v", svc1.Name, err)
|
t.Fatalf("Failed to update service %s: %v", svc1.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
ep, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc1.Name, metav1.GetOptions{})
|
ep, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc1.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("no endpoints found, error: %v", err)
|
t.Logf("no endpoints found, error: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -282,7 +282,9 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(client, 0)
|
informers := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
epController := endpoint.NewEndpointController(
|
epController := endpoint.NewEndpointController(
|
||||||
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
@@ -290,10 +292,8 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
0)
|
0)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
go epController.Run(tCtx, 1)
|
||||||
informers.Start(ctx.Done())
|
|
||||||
go epController.Run(ctx, 1)
|
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t)
|
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t)
|
||||||
@@ -337,13 +337,13 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{})
|
createdPod, err := client.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
createdPod.Status = pod.Status
|
createdPod.Status = pod.Status
|
||||||
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, createdPod, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -366,14 +366,14 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
t.Fatalf("Failed to create service %s: %v", svc.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// poll until associated Endpoints to the previously created Service exists
|
// poll until associated Endpoints to the previously created Service exists
|
||||||
if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
|
||||||
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
|
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@@ -392,7 +392,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
t.Fatalf("endpoints not found: %v", err)
|
t.Fatalf("endpoints not found: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.CoreV1().Pods(ns.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
err = client.CoreV1().Pods(ns.Name).Delete(tCtx, pod.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error deleting test pod: %v", err)
|
t.Fatalf("error deleting test pod: %v", err)
|
||||||
}
|
}
|
||||||
@@ -401,7 +401,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
|
||||||
// Ensure that the recently deleted Pod exists but with a deletion timestamp. If the Pod does not exist,
|
// Ensure that the recently deleted Pod exists but with a deletion timestamp. If the Pod does not exist,
|
||||||
// we should fail the test since it is no longer validating against a terminating pod.
|
// we should fail the test since it is no longer validating against a terminating pod.
|
||||||
pod, err := client.CoreV1().Pods(ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
|
pod, err := client.CoreV1().Pods(ns.Name).Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return false, fmt.Errorf("expected Pod %q to exist with deletion timestamp but was not found: %v", pod.Name, err)
|
return false, fmt.Errorf("expected Pod %q to exist with deletion timestamp but was not found: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
@@ -413,7 +413,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
|
|||||||
return false, errors.New("pod did not have deletion timestamp set")
|
return false, errors.New("pod did not have deletion timestamp set")
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
|
endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(tCtx, svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@@ -30,12 +30,12 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring"
|
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEndpointSliceMirroring(t *testing.T) {
|
func TestEndpointSliceMirroring(t *testing.T) {
|
||||||
@@ -48,11 +48,12 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
t.Fatalf("Error creating clientset: %v", err)
|
t.Fatalf("Error creating clientset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
tCtx := ktesting.Init(t)
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
|
||||||
epController := endpoint.NewEndpointController(
|
epController := endpoint.NewEndpointController(
|
||||||
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
@@ -60,7 +61,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
epsController := endpointslice.NewController(
|
epsController := endpointslice.NewController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@@ -70,7 +71,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
epsmController := endpointslicemirroring.NewController(
|
epsmController := endpointslicemirroring.NewController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
informers.Discovery().V1().EndpointSlices(),
|
informers.Discovery().V1().EndpointSlices(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
@@ -79,11 +80,10 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
defer cancel()
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go epController.Run(tCtx, 5)
|
||||||
go epController.Run(ctx, 5)
|
go epsController.Run(tCtx, 5)
|
||||||
go epsController.Run(ctx, 5)
|
go epsmController.Run(tCtx, 5)
|
||||||
go epsmController.Run(ctx, 5)
|
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
testName string
|
testName string
|
||||||
@@ -259,7 +259,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
if tc.service != nil {
|
if tc.service != nil {
|
||||||
resourceName = tc.service.Name
|
resourceName = tc.service.Name
|
||||||
tc.service.Namespace = ns.Name
|
tc.service.Namespace = ns.Name
|
||||||
_, err = client.CoreV1().Services(ns.Name).Create(ctx, tc.service, metav1.CreateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Create(tCtx, tc.service, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating service: %v", err)
|
t.Fatalf("Error creating service: %v", err)
|
||||||
}
|
}
|
||||||
@@ -268,7 +268,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
if tc.customEndpoints != nil {
|
if tc.customEndpoints != nil {
|
||||||
resourceName = tc.customEndpoints.Name
|
resourceName = tc.customEndpoints.Name
|
||||||
tc.customEndpoints.Namespace = ns.Name
|
tc.customEndpoints.Namespace = ns.Name
|
||||||
_, err = client.CoreV1().Endpoints(ns.Name).Create(ctx, tc.customEndpoints, metav1.CreateOptions{})
|
_, err = client.CoreV1().Endpoints(ns.Name).Create(tCtx, tc.customEndpoints, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating endpoints: %v", err)
|
t.Fatalf("Error creating endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@@ -276,7 +276,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
|
|
||||||
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
lSelector := discovery.LabelServiceName + "=" + resourceName
|
lSelector := discovery.LabelServiceName + "=" + resourceName
|
||||||
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector})
|
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(tCtx, metav1.ListOptions{LabelSelector: lSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Error listing EndpointSlices: %v", err)
|
t.Logf("Error listing EndpointSlices: %v", err)
|
||||||
return false, err
|
return false, err
|
||||||
@@ -312,7 +312,6 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
defer server.TearDownFn()
|
defer server.TearDownFn()
|
||||||
@@ -325,8 +324,9 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
epsmController := endpointslicemirroring.NewController(
|
epsmController := endpointslicemirroring.NewController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
informers.Discovery().V1().EndpointSlices(),
|
informers.Discovery().V1().EndpointSlices(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
@@ -335,10 +335,8 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
go epsmController.Run(tCtx, 1)
|
||||||
informers.Start(ctx.Done())
|
|
||||||
go epsmController.Run(ctx, 1)
|
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
testName string
|
testName string
|
||||||
@@ -405,19 +403,19 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{})
|
_, err = client.CoreV1().Services(ns.Name).Create(tCtx, service, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating service: %v", err)
|
t.Fatalf("Error creating service: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = client.CoreV1().Endpoints(ns.Name).Create(ctx, customEndpoints, metav1.CreateOptions{})
|
_, err = client.CoreV1().Endpoints(ns.Name).Create(tCtx, customEndpoints, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating endpoints: %v", err)
|
t.Fatalf("Error creating endpoints: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// update endpoint
|
// update endpoint
|
||||||
tc.tweakEndpoint(customEndpoints)
|
tc.tweakEndpoint(customEndpoints)
|
||||||
_, err = client.CoreV1().Endpoints(ns.Name).Update(ctx, customEndpoints, metav1.UpdateOptions{})
|
_, err = client.CoreV1().Endpoints(ns.Name).Update(tCtx, customEndpoints, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error updating endpoints: %v", err)
|
t.Fatalf("Error updating endpoints: %v", err)
|
||||||
}
|
}
|
||||||
@@ -425,7 +423,7 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
|||||||
// verify the endpoint updates were mirrored
|
// verify the endpoint updates were mirrored
|
||||||
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
lSelector := discovery.LabelServiceName + "=" + service.Name
|
lSelector := discovery.LabelServiceName + "=" + service.Name
|
||||||
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector})
|
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(tCtx, metav1.ListOptions{LabelSelector: lSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Error listing EndpointSlices: %v", err)
|
t.Logf("Error listing EndpointSlices: %v", err)
|
||||||
return false, err
|
return false, err
|
||||||
@@ -489,7 +487,6 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEndpointSliceMirroringSelectorTransition(t *testing.T) {
|
func TestEndpointSliceMirroringSelectorTransition(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
defer server.TearDownFn()
|
defer server.TearDownFn()
|
||||||
@@ -502,8 +499,9 @@ func TestEndpointSliceMirroringSelectorTransition(t *testing.T) {
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
epsmController := endpointslicemirroring.NewController(
|
epsmController := endpointslicemirroring.NewController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Endpoints(),
|
informers.Core().V1().Endpoints(),
|
||||||
informers.Discovery().V1().EndpointSlices(),
|
informers.Discovery().V1().EndpointSlices(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
@@ -512,10 +510,8 @@ func TestEndpointSliceMirroringSelectorTransition(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
go epsmController.Run(tCtx, 1)
|
||||||
informers.Start(ctx.Done())
|
|
||||||
go epsmController.Run(ctx, 1)
|
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
testName string
|
testName string
|
||||||
|
@@ -29,10 +29,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -116,9 +116,9 @@ func TestEndpointSliceTerminating(t *testing.T) {
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
epsController := endpointslice.NewController(
|
epsController := endpointslice.NewController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@@ -128,10 +128,8 @@ func TestEndpointSliceTerminating(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
go epsController.Run(tCtx, 1)
|
||||||
informers.Start(ctx.Done())
|
|
||||||
go epsController.Run(ctx, 1)
|
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t)
|
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t)
|
||||||
|
@@ -47,11 +47,11 @@ import (
|
|||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/controller-manager/pkg/informerfactory"
|
"k8s.io/controller-manager/pkg/informerfactory"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||||
"k8s.io/kubernetes/test/integration"
|
"k8s.io/kubernetes/test/integration"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -247,9 +247,13 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
|||||||
}
|
}
|
||||||
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
|
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
|
||||||
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
|
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
|
||||||
|
|
||||||
|
logger, ctx := ktesting.NewTestContext(t)
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := garbagecollector.NewGarbageCollector(
|
gc, err := garbagecollector.NewGarbageCollector(
|
||||||
|
ctx,
|
||||||
clientSet,
|
clientSet,
|
||||||
metadataClient,
|
metadataClient,
|
||||||
restMapper,
|
restMapper,
|
||||||
@@ -261,8 +265,6 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
|
|||||||
t.Fatalf("failed to create garbage collector: %v", err)
|
t.Fatalf("failed to create garbage collector: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
tearDown := func() {
|
tearDown := func() {
|
||||||
cancel()
|
cancel()
|
||||||
result.TearDownFn()
|
result.TearDownFn()
|
||||||
|
@@ -37,13 +37,13 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
watchtools "k8s.io/client-go/tools/watch"
|
watchtools "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||||
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
|
||||||
quotainstall "k8s.io/kubernetes/pkg/quota/v1/install"
|
quotainstall "k8s.io/kubernetes/pkg/quota/v1/install"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -60,7 +60,7 @@ const (
|
|||||||
// quota_test.go:100: Took 4.196205966s to scale up without quota
|
// quota_test.go:100: Took 4.196205966s to scale up without quota
|
||||||
// quota_test.go:115: Took 12.021640372s to scale up with quota
|
// quota_test.go:115: Took 12.021640372s to scale up with quota
|
||||||
func TestQuota(t *testing.T) {
|
func TestQuota(t *testing.T) {
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@@ -82,7 +82,7 @@ func TestQuota(t *testing.T) {
|
|||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
rm := replicationcontroller.NewReplicationManager(
|
rm := replicationcontroller.NewReplicationManager(
|
||||||
logger,
|
ctx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().ReplicationControllers(),
|
informers.Core().V1().ReplicationControllers(),
|
||||||
clientset,
|
clientset,
|
||||||
@@ -291,12 +291,10 @@ plugins:
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Set up an API server
|
// Set up an API server
|
||||||
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
|
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
|
||||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||||
@@ -313,13 +311,13 @@ plugins:
|
|||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
rm := replicationcontroller.NewReplicationManager(
|
rm := replicationcontroller.NewReplicationManager(
|
||||||
logger,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().ReplicationControllers(),
|
informers.Core().V1().ReplicationControllers(),
|
||||||
clientset,
|
clientset,
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
)
|
)
|
||||||
go rm.Run(ctx, 3)
|
go rm.Run(tCtx, 3)
|
||||||
|
|
||||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||||
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
|
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
|
||||||
@@ -336,16 +334,16 @@ plugins:
|
|||||||
InformersStarted: informersStarted,
|
InformersStarted: informersStarted,
|
||||||
Registry: generic.NewRegistry(qc.Evaluators()),
|
Registry: generic.NewRegistry(qc.Evaluators()),
|
||||||
}
|
}
|
||||||
resourceQuotaController, err := resourcequotacontroller.NewController(ctx, resourceQuotaControllerOptions)
|
resourceQuotaController, err := resourcequotacontroller.NewController(tCtx, resourceQuotaControllerOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected err: %v", err)
|
t.Fatalf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
go resourceQuotaController.Run(ctx, 2)
|
go resourceQuotaController.Run(tCtx, 2)
|
||||||
|
|
||||||
// Periodically the quota controller to detect new resource types
|
// Periodically the quota controller to detect new resource types
|
||||||
go resourceQuotaController.Sync(ctx, discoveryFunc, 30*time.Second)
|
go resourceQuotaController.Sync(tCtx, discoveryFunc, 30*time.Second)
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(tCtx.Done())
|
||||||
close(informersStarted)
|
close(informersStarted)
|
||||||
|
|
||||||
// try to create a pod
|
// try to create a pod
|
||||||
@@ -363,7 +361,7 @@ plugins:
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if _, err := clientset.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err == nil {
|
if _, err := clientset.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err == nil {
|
||||||
t.Fatalf("expected error for insufficient quota")
|
t.Fatalf("expected error for insufficient quota")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -386,7 +384,7 @@ plugins:
|
|||||||
// attempt to create a new pod once the quota is propagated
|
// attempt to create a new pod once the quota is propagated
|
||||||
err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||||
// retry until we succeed (to allow time for all changes to propagate)
|
// retry until we succeed (to allow time for all changes to propagate)
|
||||||
if _, err := clientset.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err == nil {
|
if _, err := clientset.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -419,12 +417,10 @@ plugins:
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Set up an API server
|
// Set up an API server
|
||||||
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
|
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
|
||||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||||
@@ -441,13 +437,13 @@ plugins:
|
|||||||
|
|
||||||
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
rm := replicationcontroller.NewReplicationManager(
|
rm := replicationcontroller.NewReplicationManager(
|
||||||
logger,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().ReplicationControllers(),
|
informers.Core().V1().ReplicationControllers(),
|
||||||
clientset,
|
clientset,
|
||||||
replicationcontroller.BurstReplicas,
|
replicationcontroller.BurstReplicas,
|
||||||
)
|
)
|
||||||
go rm.Run(ctx, 3)
|
go rm.Run(tCtx, 3)
|
||||||
|
|
||||||
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
|
||||||
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
|
listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
|
||||||
@@ -464,16 +460,16 @@ plugins:
|
|||||||
InformersStarted: informersStarted,
|
InformersStarted: informersStarted,
|
||||||
Registry: generic.NewRegistry(qc.Evaluators()),
|
Registry: generic.NewRegistry(qc.Evaluators()),
|
||||||
}
|
}
|
||||||
resourceQuotaController, err := resourcequotacontroller.NewController(ctx, resourceQuotaControllerOptions)
|
resourceQuotaController, err := resourcequotacontroller.NewController(tCtx, resourceQuotaControllerOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected err: %v", err)
|
t.Fatalf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
go resourceQuotaController.Run(ctx, 2)
|
go resourceQuotaController.Run(tCtx, 2)
|
||||||
|
|
||||||
// Periodically the quota controller to detect new resource types
|
// Periodically the quota controller to detect new resource types
|
||||||
go resourceQuotaController.Sync(ctx, discoveryFunc, 30*time.Second)
|
go resourceQuotaController.Sync(tCtx, discoveryFunc, 30*time.Second)
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(tCtx.Done())
|
||||||
close(informersStarted)
|
close(informersStarted)
|
||||||
|
|
||||||
// now create a covering quota
|
// now create a covering quota
|
||||||
@@ -496,14 +492,14 @@ plugins:
|
|||||||
|
|
||||||
// Creating the first node port service should succeed
|
// Creating the first node port service should succeed
|
||||||
nodePortService := newService("np-svc", v1.ServiceTypeNodePort, true)
|
nodePortService := newService("np-svc", v1.ServiceTypeNodePort, true)
|
||||||
_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, nodePortService, metav1.CreateOptions{})
|
_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, nodePortService, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("creating first node port Service should not have returned error: %v", err)
|
t.Errorf("creating first node port Service should not have returned error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creating the first loadbalancer service should succeed
|
// Creating the first loadbalancer service should succeed
|
||||||
lbServiceWithNodePort1 := newService("lb-svc-withnp1", v1.ServiceTypeLoadBalancer, true)
|
lbServiceWithNodePort1 := newService("lb-svc-withnp1", v1.ServiceTypeLoadBalancer, true)
|
||||||
_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, lbServiceWithNodePort1, metav1.CreateOptions{})
|
_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, lbServiceWithNodePort1, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("creating first loadbalancer Service should not have returned error: %v", err)
|
t.Errorf("creating first loadbalancer Service should not have returned error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -522,7 +518,7 @@ plugins:
|
|||||||
|
|
||||||
// Creating a loadbalancer Service without node ports should succeed
|
// Creating a loadbalancer Service without node ports should succeed
|
||||||
lbServiceWithoutNodePort1 := newService("lb-svc-wonp1", v1.ServiceTypeLoadBalancer, false)
|
lbServiceWithoutNodePort1 := newService("lb-svc-wonp1", v1.ServiceTypeLoadBalancer, false)
|
||||||
_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, lbServiceWithoutNodePort1, metav1.CreateOptions{})
|
_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, lbServiceWithoutNodePort1, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("creating another loadbalancer Service without node ports should not have returned error: %v", err)
|
t.Errorf("creating another loadbalancer Service without node ports should not have returned error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -541,7 +537,7 @@ plugins:
|
|||||||
|
|
||||||
// Creating a ClusterIP Service should succeed
|
// Creating a ClusterIP Service should succeed
|
||||||
clusterIPService1 := newService("clusterip-svc1", v1.ServiceTypeClusterIP, false)
|
clusterIPService1 := newService("clusterip-svc1", v1.ServiceTypeClusterIP, false)
|
||||||
_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, clusterIPService1, metav1.CreateOptions{})
|
_, err = clientset.CoreV1().Services(ns.Name).Create(tCtx, clusterIPService1, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("creating a cluster IP Service should not have returned error: %v", err)
|
t.Errorf("creating a cluster IP Service should not have returned error: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -40,7 +40,6 @@ import (
|
|||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/apis/core"
|
"k8s.io/kubernetes/pkg/apis/core"
|
||||||
@@ -48,6 +47,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
testutil "k8s.io/kubernetes/test/utils"
|
testutil "k8s.io/kubernetes/test/utils"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -118,7 +118,8 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
|
func rmSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
|
|
||||||
@@ -129,17 +130,21 @@ func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.Repli
|
|||||||
}
|
}
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rs-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rs-informers")), resyncPeriod)
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
|
||||||
|
|
||||||
rm := replicaset.NewReplicaSetController(
|
rm := replicaset.NewReplicaSetController(
|
||||||
logger,
|
tCtx,
|
||||||
informers.Apps().V1().ReplicaSets(),
|
informers.Apps().V1().ReplicaSets(),
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")),
|
||||||
replicaset.BurstReplicas,
|
replicaset.BurstReplicas,
|
||||||
)
|
)
|
||||||
|
|
||||||
return server.TearDownFn, rm, informers, clientSet
|
newTeardown := func() {
|
||||||
|
tCtx.Cancel("tearing down controller")
|
||||||
|
server.TearDownFn()
|
||||||
|
}
|
||||||
|
|
||||||
|
return tCtx, newTeardown, rm, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmSimpleSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, clientset.Interface) {
|
func rmSimpleSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, clientset.Interface) {
|
||||||
@@ -426,22 +431,23 @@ func TestAdoption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
closeFn, rm, informers, clientSet := rmSetup(t)
|
tCtx, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rs-adoption-%d", i), t)
|
ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rs-adoption-%d", i), t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
|
||||||
|
|
||||||
rsClient := clientSet.AppsV1().ReplicaSets(ns.Name)
|
rsClient := clientSet.AppsV1().ReplicaSets(ns.Name)
|
||||||
podClient := clientSet.CoreV1().Pods(ns.Name)
|
podClient := clientSet.CoreV1().Pods(ns.Name)
|
||||||
const rsName = "rs"
|
const rsName = "rs"
|
||||||
rs, err := rsClient.Create(context.TODO(), newRS(rsName, ns.Name, 1), metav1.CreateOptions{})
|
rs, err := rsClient.Create(tCtx, newRS(rsName, ns.Name, 1), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create replica set: %v", err)
|
t.Fatalf("Failed to create replica set: %v", err)
|
||||||
}
|
}
|
||||||
podName := fmt.Sprintf("pod%d", i)
|
podName := fmt.Sprintf("pod%d", i)
|
||||||
pod := newMatchingPod(podName, ns.Name)
|
pod := newMatchingPod(podName, ns.Name)
|
||||||
pod.OwnerReferences = tc.existingOwnerReferences(rs)
|
pod.OwnerReferences = tc.existingOwnerReferences(rs)
|
||||||
_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
|
_, err = podClient.Create(tCtx, pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create Pod: %v", err)
|
t.Fatalf("Failed to create Pod: %v", err)
|
||||||
}
|
}
|
||||||
@@ -449,7 +455,7 @@ func TestAdoption(t *testing.T) {
|
|||||||
stopControllers := runControllerAndInformers(t, rm, informers, 1)
|
stopControllers := runControllerAndInformers(t, rm, informers, 1)
|
||||||
defer stopControllers()
|
defer stopControllers()
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
updatedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
updatedPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -497,7 +503,7 @@ func TestRSSelectorImmutability(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSpecReplicasChange(t *testing.T) {
|
func TestSpecReplicasChange(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -528,7 +534,7 @@ func TestSpecReplicasChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{})
|
newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -539,8 +545,9 @@ func TestSpecReplicasChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletingAndFailedPods(t *testing.T) {
|
func TestDeletingAndFailedPods(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
stopControllers := runControllerAndInformers(t, rm, informers, 0)
|
stopControllers := runControllerAndInformers(t, rm, informers, 0)
|
||||||
@@ -564,7 +571,7 @@ func TestDeletingAndFailedPods(t *testing.T) {
|
|||||||
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
||||||
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
||||||
})
|
})
|
||||||
if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil {
|
if err := c.CoreV1().Pods(ns.Name).Delete(tCtx, deletingPod.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err)
|
t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -642,7 +649,7 @@ func TestPodDeletionCost(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDeletionCost, tc.enabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDeletionCost, tc.enabled)()
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, tc.name, t)
|
ns := framework.CreateNamespaceOrDie(c, tc.name, t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -701,7 +708,7 @@ func TestPodDeletionCost(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOverlappingRSs(t *testing.T) {
|
func TestOverlappingRSs(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rss", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rss", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -725,7 +732,7 @@ func TestOverlappingRSs(t *testing.T) {
|
|||||||
|
|
||||||
// Expect both RSs have .status.replicas = .spec.replicas
|
// Expect both RSs have .status.replicas = .spec.replicas
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
newRS, err := c.AppsV1().ReplicaSets(ns.Name).Get(context.TODO(), fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{})
|
newRS, err := c.AppsV1().ReplicaSets(ns.Name).Get(tCtx, fmt.Sprintf("rs-%d", i+1), metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to obtain rs rs-%d: %v", i+1, err)
|
t.Fatalf("failed to obtain rs rs-%d: %v", i+1, err)
|
||||||
}
|
}
|
||||||
@@ -736,7 +743,7 @@ func TestOverlappingRSs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -765,7 +772,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
|||||||
pod.Labels = newLabelMap
|
pod.Labels = newLabelMap
|
||||||
})
|
})
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -780,7 +787,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
|||||||
pod.Labels = labelMap()
|
pod.Labels = labelMap()
|
||||||
})
|
})
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the pod is not found, it means the RS picks the pod for deletion (it is extra)
|
// If the pod is not found, it means the RS picks the pod for deletion (it is extra)
|
||||||
// Verify there is only one pod in namespace and it has ControllerRef to the RS
|
// Verify there is only one pod in namespace and it has ControllerRef to the RS
|
||||||
@@ -814,7 +821,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGeneralPodAdoption(t *testing.T) {
|
func TestGeneralPodAdoption(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -846,7 +853,7 @@ func TestGeneralPodAdoption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadyAndAvailableReplicas(t *testing.T) {
|
func TestReadyAndAvailableReplicas(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -886,7 +893,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) {
|
|||||||
|
|
||||||
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{})
|
newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -898,7 +905,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRSScaleSubresource(t *testing.T) {
|
func TestRSScaleSubresource(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-rs-scale-subresource", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-rs-scale-subresource", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -917,7 +924,7 @@ func TestRSScaleSubresource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -949,7 +956,7 @@ func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFullyLabeledReplicas(t *testing.T) {
|
func TestFullyLabeledReplicas(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -981,7 +988,7 @@ func TestFullyLabeledReplicas(t *testing.T) {
|
|||||||
|
|
||||||
// Verify only one pod is fully labeled
|
// Verify only one pod is fully labeled
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{})
|
newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -992,7 +999,7 @@ func TestFullyLabeledReplicas(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-default-gc-v1", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-default-gc-v1", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -1014,14 +1021,14 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
rsClient := c.AppsV1().ReplicaSets(ns.Name)
|
||||||
err := rsClient.Delete(context.TODO(), rs.Name, metav1.DeleteOptions{})
|
err := rsClient.Delete(tCtx, rs.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to delete rs: %v", err)
|
t.Fatalf("Failed to delete rs: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify no new finalizer has been added
|
// Verify no new finalizer has been added
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRS, err := rsClient.Get(context.TODO(), rs.Name, metav1.GetOptions{})
|
newRS, err := rsClient.Get(tCtx, rs.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -1047,5 +1054,5 @@ func TestReplicaSetsAppsV1DefaultGCPolicy(t *testing.T) {
|
|||||||
rs.Finalizers = finalizers
|
rs.Finalizers = finalizers
|
||||||
})
|
})
|
||||||
|
|
||||||
rsClient.Delete(context.TODO(), rs.Name, metav1.DeleteOptions{})
|
_ = rsClient.Delete(tCtx, rs.Name, metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
|
@@ -38,12 +38,12 @@ import (
|
|||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/replication"
|
"k8s.io/kubernetes/pkg/controller/replication"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -111,7 +111,8 @@ func newMatchingPod(podName, namespace string) *v1.Pod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
|
func rmSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
|
|
||||||
@@ -123,16 +124,19 @@ func rmSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *replication.Repl
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rc-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rc-informers")), resyncPeriod)
|
||||||
|
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
|
||||||
rm := replication.NewReplicationManager(
|
rm := replication.NewReplicationManager(
|
||||||
logger,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().ReplicationControllers(),
|
informers.Core().V1().ReplicationControllers(),
|
||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replication-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replication-controller")),
|
||||||
replication.BurstReplicas,
|
replication.BurstReplicas,
|
||||||
)
|
)
|
||||||
|
newTeardown := func() {
|
||||||
|
tCtx.Cancel("tearing down controller")
|
||||||
|
server.TearDownFn()
|
||||||
|
}
|
||||||
|
|
||||||
return server.TearDownFn, rm, informers, clientSet
|
return tCtx, newTeardown, rm, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run RC controller and informers
|
// Run RC controller and informers
|
||||||
@@ -414,7 +418,7 @@ func TestAdoption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
closeFn, rm, informers, clientSet := rmSetup(t)
|
tCtx, closeFn, rm, informers, clientSet := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rc-adoption-%d", i), t)
|
ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rc-adoption-%d", i), t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
|
||||||
@@ -422,14 +426,14 @@ func TestAdoption(t *testing.T) {
|
|||||||
rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name)
|
rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name)
|
||||||
podClient := clientSet.CoreV1().Pods(ns.Name)
|
podClient := clientSet.CoreV1().Pods(ns.Name)
|
||||||
const rcName = "rc"
|
const rcName = "rc"
|
||||||
rc, err := rcClient.Create(context.TODO(), newRC(rcName, ns.Name, 1), metav1.CreateOptions{})
|
rc, err := rcClient.Create(tCtx, newRC(rcName, ns.Name, 1), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create replication controllers: %v", err)
|
t.Fatalf("Failed to create replication controllers: %v", err)
|
||||||
}
|
}
|
||||||
podName := fmt.Sprintf("pod%d", i)
|
podName := fmt.Sprintf("pod%d", i)
|
||||||
pod := newMatchingPod(podName, ns.Name)
|
pod := newMatchingPod(podName, ns.Name)
|
||||||
pod.OwnerReferences = tc.existingOwnerReferences(rc)
|
pod.OwnerReferences = tc.existingOwnerReferences(rc)
|
||||||
_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
|
_, err = podClient.Create(tCtx, pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create Pod: %v", err)
|
t.Fatalf("Failed to create Pod: %v", err)
|
||||||
}
|
}
|
||||||
@@ -437,7 +441,7 @@ func TestAdoption(t *testing.T) {
|
|||||||
stopControllers := runControllerAndInformers(t, rm, informers, 1)
|
stopControllers := runControllerAndInformers(t, rm, informers, 1)
|
||||||
defer stopControllers()
|
defer stopControllers()
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
updatedPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
updatedPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -457,7 +461,7 @@ func TestAdoption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSpecReplicasChange(t *testing.T) {
|
func TestSpecReplicasChange(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -488,7 +492,7 @@ func TestSpecReplicasChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
|
newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -500,7 +504,7 @@ func TestSpecReplicasChange(t *testing.T) {
|
|||||||
|
|
||||||
func TestLogarithmicScaleDown(t *testing.T) {
|
func TestLogarithmicScaleDown(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, true)()
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -513,7 +517,7 @@ func TestLogarithmicScaleDown(t *testing.T) {
|
|||||||
waitRCStable(t, c, rc)
|
waitRCStable(t, c, rc)
|
||||||
|
|
||||||
// get list of pods in the cluster
|
// get list of pods in the cluster
|
||||||
pods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), metav1.ListOptions{})
|
pods, err := c.CoreV1().Pods(ns.Name).List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err)
|
t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err)
|
||||||
}
|
}
|
||||||
@@ -526,7 +530,7 @@ func TestLogarithmicScaleDown(t *testing.T) {
|
|||||||
// (meaning the 3rd one was deleted)
|
// (meaning the 3rd one was deleted)
|
||||||
scaleRC(t, c, rc, 2)
|
scaleRC(t, c, rc, 2)
|
||||||
|
|
||||||
newPods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), metav1.ListOptions{})
|
newPods, err := c.CoreV1().Pods(ns.Name).List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err)
|
t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err)
|
||||||
}
|
}
|
||||||
@@ -537,7 +541,7 @@ func TestLogarithmicScaleDown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletingAndFailedPods(t *testing.T) {
|
func TestDeletingAndFailedPods(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -562,7 +566,7 @@ func TestDeletingAndFailedPods(t *testing.T) {
|
|||||||
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) {
|
||||||
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
|
||||||
})
|
})
|
||||||
if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), deletingPod.Name, metav1.DeleteOptions{}); err != nil {
|
if err := c.CoreV1().Pods(ns.Name).Delete(tCtx, deletingPod.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err)
|
t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -602,7 +606,7 @@ func TestDeletingAndFailedPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOverlappingRCs(t *testing.T) {
|
func TestOverlappingRCs(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rcs", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rcs", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -626,7 +630,7 @@ func TestOverlappingRCs(t *testing.T) {
|
|||||||
|
|
||||||
// Expect both RCs have .status.replicas = .spec.replicas
|
// Expect both RCs have .status.replicas = .spec.replicas
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(context.TODO(), fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{})
|
newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(tCtx, fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to obtain rc rc-%d: %v", i+1, err)
|
t.Fatalf("failed to obtain rc rc-%d: %v", i+1, err)
|
||||||
}
|
}
|
||||||
@@ -637,7 +641,7 @@ func TestOverlappingRCs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -666,7 +670,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
|||||||
pod.Labels = newLabelMap
|
pod.Labels = newLabelMap
|
||||||
})
|
})
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -681,7 +685,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
|||||||
pod.Labels = labelMap()
|
pod.Labels = labelMap()
|
||||||
})
|
})
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the pod is not found, it means the RC picks the pod for deletion (it is extra)
|
// If the pod is not found, it means the RC picks the pod for deletion (it is extra)
|
||||||
// Verify there is only one pod in namespace and it has ControllerRef to the RC
|
// Verify there is only one pod in namespace and it has ControllerRef to the RC
|
||||||
@@ -715,7 +719,7 @@ func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGeneralPodAdoption(t *testing.T) {
|
func TestGeneralPodAdoption(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -747,7 +751,7 @@ func TestGeneralPodAdoption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadyAndAvailableReplicas(t *testing.T) {
|
func TestReadyAndAvailableReplicas(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -787,7 +791,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) {
|
|||||||
|
|
||||||
rcClient := c.CoreV1().ReplicationControllers(ns.Name)
|
rcClient := c.CoreV1().ReplicationControllers(ns.Name)
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
|
newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -799,7 +803,7 @@ func TestReadyAndAvailableReplicas(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRCScaleSubresource(t *testing.T) {
|
func TestRCScaleSubresource(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-rc-scale-subresource", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-rc-scale-subresource", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -818,7 +822,7 @@ func TestRCScaleSubresource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
_, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -850,7 +854,7 @@ func TestExtraPodsAdoptionAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFullyLabeledReplicas(t *testing.T) {
|
func TestFullyLabeledReplicas(t *testing.T) {
|
||||||
closeFn, rm, informers, c := rmSetup(t)
|
tCtx, closeFn, rm, informers, c := rmSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
@@ -882,7 +886,7 @@ func TestFullyLabeledReplicas(t *testing.T) {
|
|||||||
|
|
||||||
// Verify only one pod is fully labeled
|
// Verify only one pod is fully labeled
|
||||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||||
newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
|
newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@@ -19,7 +19,6 @@ package taint
|
|||||||
// This file tests the Taint feature.
|
// This file tests the Taint feature.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -85,7 +84,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
|||||||
|
|
||||||
// Start NodeLifecycleController for taint.
|
// Start NodeLifecycleController for taint.
|
||||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||||
context.TODO(),
|
testCtx.Ctx,
|
||||||
externalInformers.Coordination().V1().Leases(),
|
externalInformers.Coordination().V1().Leases(),
|
||||||
externalInformers.Core().V1().Pods(),
|
externalInformers.Core().V1().Pods(),
|
||||||
externalInformers.Core().V1().Nodes(),
|
externalInformers.Core().V1().Nodes(),
|
||||||
|
@@ -36,6 +36,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestMigrateServiceCIDR validates the steps necessary to migrate a cluster default ServiceCIDR
|
// TestMigrateServiceCIDR validates the steps necessary to migrate a cluster default ServiceCIDR
|
||||||
@@ -49,8 +50,7 @@ import (
|
|||||||
// 7. delete the kubernetes.default service, the new apiserver will recreate it within the new ServiceCIDR
|
// 7. delete the kubernetes.default service, the new apiserver will recreate it within the new ServiceCIDR
|
||||||
func TestMigrateServiceCIDR(t *testing.T) {
|
func TestMigrateServiceCIDR(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)()
|
||||||
ctx, cancelFn := context.WithCancel(context.Background())
|
tCtx := ktesting.Init(t)
|
||||||
defer cancelFn()
|
|
||||||
|
|
||||||
cidr1 := "192.168.0.0/29"
|
cidr1 := "192.168.0.0/29"
|
||||||
cidr2 := "10.168.0.0/24"
|
cidr2 := "10.168.0.0/24"
|
||||||
@@ -78,11 +78,12 @@ func TestMigrateServiceCIDR(t *testing.T) {
|
|||||||
informers1 := informers.NewSharedInformerFactory(client1, resyncPeriod)
|
informers1 := informers.NewSharedInformerFactory(client1, resyncPeriod)
|
||||||
// ServiceCIDR controller
|
// ServiceCIDR controller
|
||||||
go servicecidrs.NewController(
|
go servicecidrs.NewController(
|
||||||
|
tCtx,
|
||||||
informers1.Networking().V1alpha1().ServiceCIDRs(),
|
informers1.Networking().V1alpha1().ServiceCIDRs(),
|
||||||
informers1.Networking().V1alpha1().IPAddresses(),
|
informers1.Networking().V1alpha1().IPAddresses(),
|
||||||
client1,
|
client1,
|
||||||
).Run(ctx, 5)
|
).Run(tCtx, 5)
|
||||||
informers1.Start(ctx.Done())
|
informers1.Start(tCtx.Done())
|
||||||
|
|
||||||
// the default serviceCIDR should have a finalizer and ready condition set to true
|
// the default serviceCIDR should have a finalizer and ready condition set to true
|
||||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||||
@@ -203,19 +204,20 @@ func TestMigrateServiceCIDR(t *testing.T) {
|
|||||||
defer framework.DeleteNamespaceOrDie(client2, ns, t)
|
defer framework.DeleteNamespaceOrDie(client2, ns, t)
|
||||||
|
|
||||||
// switch the controller to the new apiserver
|
// switch the controller to the new apiserver
|
||||||
cancelFn()
|
tCtx.Cancel("tearing down ServiceCIDR controller 1")
|
||||||
s1.TearDownFn()
|
s1.TearDownFn()
|
||||||
|
|
||||||
// ServiceCIDR controller
|
// ServiceCIDR controller
|
||||||
ctx2, cancelFn2 := context.WithCancel(context.Background())
|
tCtx2 := ktesting.Init(t)
|
||||||
defer cancelFn2()
|
defer tCtx.Cancel("tearing down ServiceCIDR controller 2")
|
||||||
informers2 := informers.NewSharedInformerFactory(client2, resyncPeriod)
|
informers2 := informers.NewSharedInformerFactory(client2, resyncPeriod)
|
||||||
go servicecidrs.NewController(
|
go servicecidrs.NewController(
|
||||||
|
tCtx2,
|
||||||
informers2.Networking().V1alpha1().ServiceCIDRs(),
|
informers2.Networking().V1alpha1().ServiceCIDRs(),
|
||||||
informers2.Networking().V1alpha1().IPAddresses(),
|
informers2.Networking().V1alpha1().IPAddresses(),
|
||||||
client2,
|
client2,
|
||||||
).Run(ctx2, 5)
|
).Run(tCtx2, 5)
|
||||||
informers2.Start(ctx2.Done())
|
informers2.Start(tCtx2.Done())
|
||||||
|
|
||||||
// delete the kubernetes.default service so the old DefaultServiceCIDR can be deleted
|
// delete the kubernetes.default service so the old DefaultServiceCIDR can be deleted
|
||||||
// and the new apiserver can take over
|
// and the new apiserver can take over
|
||||||
|
@@ -65,6 +65,7 @@ func TestServiceAllocNewServiceCIDR(t *testing.T) {
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
go servicecidrs.NewController(
|
go servicecidrs.NewController(
|
||||||
|
ctx,
|
||||||
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
||||||
informerFactory.Networking().V1alpha1().IPAddresses(),
|
informerFactory.Networking().V1alpha1().IPAddresses(),
|
||||||
client,
|
client,
|
||||||
@@ -165,6 +166,7 @@ func TestServiceCIDRDeletion(t *testing.T) {
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
go servicecidrs.NewController(
|
go servicecidrs.NewController(
|
||||||
|
ctx,
|
||||||
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
||||||
informerFactory.Networking().V1alpha1().IPAddresses(),
|
informerFactory.Networking().V1alpha1().IPAddresses(),
|
||||||
client,
|
client,
|
||||||
|
@@ -37,13 +37,13 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/statefulset"
|
"k8s.io/kubernetes/pkg/controller/statefulset"
|
||||||
"k8s.io/kubernetes/pkg/controlplane"
|
"k8s.io/kubernetes/pkg/controlplane"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -126,12 +126,11 @@ func TestVolumeTemplateNoopUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSpecReplicasChange(t *testing.T) {
|
func TestSpecReplicasChange(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx, closeFn, rm, informers, c := scSetup(t)
|
||||||
closeFn, rm, informers, c := scSetup(ctx, t)
|
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(tCtx, rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
createHeadlessService(t, c, newHeadlessService(ns.Name))
|
createHeadlessService(t, c, newHeadlessService(ns.Name))
|
||||||
@@ -170,12 +169,11 @@ func TestSpecReplicasChange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletingAndTerminatingPods(t *testing.T) {
|
func TestDeletingAndTerminatingPods(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx, closeFn, rm, informers, c := scSetup(t)
|
||||||
closeFn, rm, informers, c := scSetup(ctx, t)
|
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(tCtx, rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
podCount := 3
|
podCount := 3
|
||||||
@@ -289,12 +287,11 @@ func TestStatefulSetAvailable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx, closeFn, rm, informers, c := scSetup(t)
|
||||||
closeFn, rm, informers, c := scSetup(ctx, t)
|
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-available-pods", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-available-pods", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(tCtx, rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
labelMap := labelMap()
|
labelMap := labelMap()
|
||||||
@@ -380,12 +377,9 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1
|
|||||||
|
|
||||||
// add for issue: https://github.com/kubernetes/kubernetes/issues/108837
|
// add for issue: https://github.com/kubernetes/kubernetes/issues/108837
|
||||||
func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
limitedPodNumber := 2
|
limitedPodNumber := 2
|
||||||
c, config, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
|
c, config, closeFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
|
||||||
ModifyServerConfig: func(config *controlplane.Config) {
|
ModifyServerConfig: func(config *controlplane.Config) {
|
||||||
config.GenericConfig.AdmissionControl = &fakePodFailAdmission{
|
config.GenericConfig.AdmissionControl = &fakePodFailAdmission{
|
||||||
limitedPodNumber: limitedPodNumber,
|
limitedPodNumber: limitedPodNumber,
|
||||||
@@ -393,11 +387,11 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod)
|
||||||
ssc := statefulset.NewStatefulSetController(
|
ssc := statefulset.NewStatefulSetController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Apps().V1().StatefulSets(),
|
informers.Apps().V1().StatefulSets(),
|
||||||
informers.Core().V1().PersistentVolumeClaims(),
|
informers.Core().V1().PersistentVolumeClaims(),
|
||||||
@@ -408,11 +402,11 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
|||||||
ns := framework.CreateNamespaceOrDie(c, "test-pod-fail", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-pod-fail", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(tCtx.Done())
|
||||||
go ssc.Run(ctx, 5)
|
go ssc.Run(tCtx, 5)
|
||||||
|
|
||||||
sts := newSTS("sts", ns.Name, 4)
|
sts := newSTS("sts", ns.Name, 4)
|
||||||
_, err := c.AppsV1().StatefulSets(sts.Namespace).Create(ctx, sts, metav1.CreateOptions{})
|
_, err := c.AppsV1().StatefulSets(sts.Namespace).Create(tCtx, sts, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Could not create statefulSet %s: %v", sts.Name, err)
|
t.Fatalf("Could not create statefulSet %s: %v", sts.Name, err)
|
||||||
}
|
}
|
||||||
@@ -420,7 +414,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
|||||||
wantReplicas := limitedPodNumber
|
wantReplicas := limitedPodNumber
|
||||||
var gotReplicas int32
|
var gotReplicas int32
|
||||||
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||||
newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(ctx, sts.Name, metav1.GetOptions{})
|
newSTS, err := c.AppsV1().StatefulSets(sts.Namespace).Get(tCtx, sts.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -477,10 +471,10 @@ func TestAutodeleteOwnerRefs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)()
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
|
||||||
closeFn, rm, informers, c := scSetup(ctx, t)
|
tCtx, closeFn, rm, informers, c := scSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(tCtx, rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
@@ -521,12 +515,11 @@ func TestAutodeleteOwnerRefs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
|
func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx, closeFn, rm, informers, c := scSetup(t)
|
||||||
closeFn, rm, informers, c := scSetup(ctx, t)
|
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(c, "test-deleting-pod-for-rolling-update-partition", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-deleting-pod-for-rolling-update-partition", t)
|
||||||
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(tCtx, rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
labelMap := labelMap()
|
labelMap := labelMap()
|
||||||
@@ -570,7 +563,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Await for the pod-1 to be recreated, while pod-0 remains running
|
// Await for the pod-1 to be recreated, while pod-0 remains running
|
||||||
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
||||||
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
|
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -599,7 +592,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Await for pod-0 to be not ready
|
// Await for pod-0 to be not ready
|
||||||
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
||||||
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
|
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -615,7 +608,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Await for pod-0 to be recreated and make it running
|
// Await for pod-0 to be recreated and make it running
|
||||||
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
||||||
pods := getPods(t, podClient, labelMap)
|
pods := getPods(t, podClient, labelMap)
|
||||||
recreatedPods := v1.PodList{}
|
recreatedPods := v1.PodList{}
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
@@ -630,7 +623,7 @@ func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Await for all stateful set status to record all replicas as ready
|
// Await for all stateful set status to record all replicas as ready
|
||||||
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
if err := wait.PollUntilContextTimeout(tCtx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
|
||||||
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
|
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -702,10 +695,9 @@ func TestStatefulSetStartOrdinal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetStartOrdinal, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetStartOrdinal, true)()
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx, closeFn, rm, informers, c := scSetup(t)
|
||||||
closeFn, rm, informers, c := scSetup(ctx, t)
|
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(tCtx, rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@@ -19,6 +19,7 @@ package statefulset
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -161,7 +162,8 @@ func newStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// scSetup sets up necessities for Statefulset integration test, including control plane, apiserver, informers, and clientset
|
// scSetup sets up necessities for Statefulset integration test, including control plane, apiserver, informers, and clientset
|
||||||
func scSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) {
|
func scSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
|
|
||||||
@@ -174,7 +176,7 @@ func scSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFu
|
|||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod)
|
||||||
|
|
||||||
sc := statefulset.NewStatefulSetController(
|
sc := statefulset.NewStatefulSetController(
|
||||||
ctx,
|
tCtx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Apps().V1().StatefulSets(),
|
informers.Apps().V1().StatefulSets(),
|
||||||
informers.Core().V1().PersistentVolumeClaims(),
|
informers.Core().V1().PersistentVolumeClaims(),
|
||||||
@@ -182,12 +184,16 @@ func scSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFu
|
|||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-controller")),
|
||||||
)
|
)
|
||||||
|
|
||||||
return server.TearDownFn, sc, informers, clientSet
|
teardown := func() {
|
||||||
|
tCtx.Cancel("tearing down controller")
|
||||||
|
server.TearDownFn()
|
||||||
|
}
|
||||||
|
return tCtx, teardown, sc, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run STS controller and informers
|
// Run STS controller and informers
|
||||||
func runControllerAndInformers(sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) context.CancelFunc {
|
func runControllerAndInformers(ctx context.Context, sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) context.CancelFunc {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go sc.Run(ctx, 5)
|
go sc.Run(ctx, 5)
|
||||||
return cancel
|
return cancel
|
||||||
|
@@ -203,6 +203,7 @@ func CreateGCController(ctx context.Context, tb ktesting.TB, restConfig restclie
|
|||||||
alwaysStarted := make(chan struct{})
|
alwaysStarted := make(chan struct{})
|
||||||
close(alwaysStarted)
|
close(alwaysStarted)
|
||||||
gc, err := garbagecollector.NewGarbageCollector(
|
gc, err := garbagecollector.NewGarbageCollector(
|
||||||
|
ctx,
|
||||||
clientSet,
|
clientSet,
|
||||||
metadataClient,
|
metadataClient,
|
||||||
restMapper,
|
restMapper,
|
||||||
@@ -660,7 +661,6 @@ func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond
|
|||||||
// InitDisruptionController initializes and runs a Disruption Controller to properly
|
// InitDisruptionController initializes and runs a Disruption Controller to properly
|
||||||
// update PodDisuptionBudget objects.
|
// update PodDisuptionBudget objects.
|
||||||
func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.DisruptionController {
|
func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.DisruptionController {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
|
||||||
informers := informers.NewSharedInformerFactory(testCtx.ClientSet, 12*time.Hour)
|
informers := informers.NewSharedInformerFactory(testCtx.ClientSet, 12*time.Hour)
|
||||||
|
|
||||||
discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.ClientSet.Discovery())
|
discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.ClientSet.Discovery())
|
||||||
@@ -674,7 +674,7 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di
|
|||||||
}
|
}
|
||||||
|
|
||||||
dc := disruption.NewDisruptionController(
|
dc := disruption.NewDisruptionController(
|
||||||
ctx,
|
testCtx.Ctx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Policy().V1().PodDisruptionBudgets(),
|
informers.Policy().V1().PodDisruptionBudgets(),
|
||||||
informers.Core().V1().ReplicationControllers(),
|
informers.Core().V1().ReplicationControllers(),
|
||||||
|
@@ -31,7 +31,6 @@ import (
|
|||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
fakecloud "k8s.io/cloud-provider/fake"
|
fakecloud "k8s.io/cloud-provider/fake"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||||
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||||
@@ -41,6 +40,7 @@ import (
|
|||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
func fakePodWithVol(namespace string) *v1.Pod {
|
func fakePodWithVol(namespace string) *v1.Pod {
|
||||||
@@ -156,36 +156,35 @@ func TestPodDeletionWithDswp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
||||||
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
||||||
|
|
||||||
pod := fakePodWithVol(namespaceName)
|
pod := fakePodWithVol(namespaceName)
|
||||||
|
|
||||||
if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
|
if _, err := testClient.CoreV1().Nodes().Create(tCtx, node, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to created node : %v", err)
|
t.Fatalf("Failed to created node : %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// start controller loop
|
// start controller loop
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
go informers.Core().V1().Nodes().Informer().Run(tCtx.Done())
|
||||||
defer cancel()
|
if _, err := testClient.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err != nil {
|
||||||
|
|
||||||
go informers.Core().V1().Nodes().Informer().Run(ctx.Done())
|
|
||||||
if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
|
||||||
t.Errorf("Failed to create pod : %v", err)
|
t.Errorf("Failed to create pod : %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
go podInformer.Run(ctx.Done())
|
go podInformer.Run(tCtx.Done())
|
||||||
|
|
||||||
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done())
|
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done())
|
||||||
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
|
go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done())
|
||||||
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
|
go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done())
|
||||||
initCSIObjects(ctx.Done(), informers)
|
initCSIObjects(tCtx.Done(), informers)
|
||||||
go ctrl.Run(ctx)
|
go ctrl.Run(tCtx)
|
||||||
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
||||||
go pvCtrl.Run(ctx)
|
go pvCtrl.Run(tCtx)
|
||||||
|
|
||||||
waitToObservePods(t, podInformer, 1)
|
waitToObservePods(t, podInformer, 1)
|
||||||
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
||||||
@@ -231,7 +230,9 @@ func TestPodUpdateWithWithADC(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
||||||
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
||||||
@@ -254,16 +255,13 @@ func TestPodUpdateWithWithADC(t *testing.T) {
|
|||||||
go podInformer.Run(podStopCh)
|
go podInformer.Run(podStopCh)
|
||||||
|
|
||||||
// start controller loop
|
// start controller loop
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done())
|
||||||
defer cancel()
|
go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done())
|
||||||
|
go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done())
|
||||||
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done())
|
initCSIObjects(tCtx.Done(), informers)
|
||||||
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
|
|
||||||
initCSIObjects(ctx.Done(), informers)
|
|
||||||
go ctrl.Run(ctx)
|
|
||||||
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
||||||
go pvCtrl.Run(ctx)
|
go pvCtrl.Run(tCtx)
|
||||||
|
|
||||||
waitToObservePods(t, podInformer, 1)
|
waitToObservePods(t, podInformer, 1)
|
||||||
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
||||||
@@ -304,7 +302,9 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
||||||
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
||||||
@@ -327,16 +327,13 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
|
|||||||
go podInformer.Run(podStopCh)
|
go podInformer.Run(podStopCh)
|
||||||
|
|
||||||
// start controller loop
|
// start controller loop
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done())
|
||||||
defer cancel()
|
go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done())
|
||||||
|
go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done())
|
||||||
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done())
|
initCSIObjects(tCtx.Done(), informers)
|
||||||
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
|
|
||||||
initCSIObjects(ctx.Done(), informers)
|
|
||||||
go ctrl.Run(ctx)
|
|
||||||
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
||||||
go pvCtrl.Run(ctx)
|
go pvCtrl.Run(tCtx)
|
||||||
|
|
||||||
waitToObservePods(t, podInformer, 1)
|
waitToObservePods(t, podInformer, 1)
|
||||||
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
||||||
@@ -402,7 +399,7 @@ func waitForPodFuncInDSWP(t *testing.T, dswp volumecache.DesiredStateOfWorld, ch
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, syncPeriod time.Duration, timers attachdetach.TimerConfig) (*clientset.Clientset, attachdetach.AttachDetachController, *persistentvolume.PersistentVolumeController, clientgoinformers.SharedInformerFactory) {
|
func createAdClients(ctx context.Context, t *testing.T, server *kubeapiservertesting.TestServer, syncPeriod time.Duration, timers attachdetach.TimerConfig) (*clientset.Clientset, attachdetach.AttachDetachController, *persistentvolume.PersistentVolumeController, clientgoinformers.SharedInformerFactory) {
|
||||||
config := restclient.CopyConfig(server.ClientConfig)
|
config := restclient.CopyConfig(server.ClientConfig)
|
||||||
config.QPS = 1000000
|
config.QPS = 1000000
|
||||||
config.Burst = 1000000
|
config.Burst = 1000000
|
||||||
@@ -425,9 +422,8 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync
|
|||||||
plugins := []volume.VolumePlugin{plugin}
|
plugins := []volume.VolumePlugin{plugin}
|
||||||
cloud := &fakecloud.Cloud{}
|
cloud := &fakecloud.Cloud{}
|
||||||
informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod)
|
informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod)
|
||||||
logger, ctx := ktesting.NewTestContext(t)
|
|
||||||
ctrl, err := attachdetach.NewAttachDetachController(
|
ctrl, err := attachdetach.NewAttachDetachController(
|
||||||
logger,
|
ctx,
|
||||||
testClient,
|
testClient,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@@ -488,7 +484,10 @@ func TestPodAddedByDswp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, defaultTimerConfig)
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, defaultTimerConfig)
|
||||||
|
|
||||||
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t)
|
||||||
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
|
||||||
@@ -496,13 +495,13 @@ func TestPodAddedByDswp(t *testing.T) {
|
|||||||
pod := fakePodWithVol(namespaceName)
|
pod := fakePodWithVol(namespaceName)
|
||||||
podStopCh := make(chan struct{})
|
podStopCh := make(chan struct{})
|
||||||
|
|
||||||
if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
|
if _, err := testClient.CoreV1().Nodes().Create(tCtx, node, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("Failed to created node : %v", err)
|
t.Fatalf("Failed to created node : %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go informers.Core().V1().Nodes().Informer().Run(podStopCh)
|
go informers.Core().V1().Nodes().Informer().Run(podStopCh)
|
||||||
|
|
||||||
if _, err := testClient.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
if _, err := testClient.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err != nil {
|
||||||
t.Errorf("Failed to create pod : %v", err)
|
t.Errorf("Failed to create pod : %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -510,17 +509,13 @@ func TestPodAddedByDswp(t *testing.T) {
|
|||||||
go podInformer.Run(podStopCh)
|
go podInformer.Run(podStopCh)
|
||||||
|
|
||||||
// start controller loop
|
// start controller loop
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(tCtx.Done())
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
go informers.Core().V1().PersistentVolumes().Informer().Run(tCtx.Done())
|
||||||
defer cancel()
|
go informers.Storage().V1().VolumeAttachments().Informer().Run(tCtx.Done())
|
||||||
|
initCSIObjects(tCtx.Done(), informers)
|
||||||
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
|
|
||||||
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
|
|
||||||
initCSIObjects(ctx.Done(), informers)
|
|
||||||
go ctrl.Run(ctx)
|
|
||||||
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
// Run pvCtrl to avoid leaking goroutines started during its creation.
|
||||||
go pvCtrl.Run(ctx)
|
go pvCtrl.Run(tCtx)
|
||||||
|
|
||||||
waitToObservePods(t, podInformer, 1)
|
waitToObservePods(t, podInformer, 1)
|
||||||
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
podKey, err := cache.MetaNamespaceKeyFunc(pod)
|
||||||
@@ -556,9 +551,13 @@ func TestPVCBoundWithADC(t *testing.T) {
|
|||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
defer server.TearDownFn()
|
defer server.TearDownFn()
|
||||||
|
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
|
||||||
namespaceName := "test-pod-deletion"
|
namespaceName := "test-pod-deletion"
|
||||||
|
|
||||||
testClient, ctrl, pvCtrl, informers := createAdClients(t, server, defaultSyncPeriod, attachdetach.TimerConfig{
|
testClient, ctrl, pvCtrl, informers := createAdClients(tCtx, t, server, defaultSyncPeriod, attachdetach.TimerConfig{
|
||||||
ReconcilerLoopPeriod: 100 * time.Millisecond,
|
ReconcilerLoopPeriod: 100 * time.Millisecond,
|
||||||
ReconcilerMaxWaitForUnmountDuration: 6 * time.Second,
|
ReconcilerMaxWaitForUnmountDuration: 6 * time.Second,
|
||||||
DesiredStateOfWorldPopulatorLoopSleepPeriod: 24 * time.Hour,
|
DesiredStateOfWorldPopulatorLoopSleepPeriod: 24 * time.Hour,
|
||||||
@@ -601,14 +600,11 @@ func TestPVCBoundWithADC(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// start controller loop
|
// start controller loop
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
informers.Start(tCtx.Done())
|
||||||
defer cancel()
|
informers.WaitForCacheSync(tCtx.Done())
|
||||||
|
initCSIObjects(tCtx.Done(), informers)
|
||||||
informers.Start(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
informers.WaitForCacheSync(ctx.Done())
|
go pvCtrl.Run(tCtx)
|
||||||
initCSIObjects(ctx.Done(), informers)
|
|
||||||
go ctrl.Run(ctx)
|
|
||||||
go pvCtrl.Run(ctx)
|
|
||||||
|
|
||||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4)
|
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4)
|
||||||
// Give attachdetach controller enough time to populate pods into DSWP.
|
// Give attachdetach controller enough time to populate pods into DSWP.
|
||||||
|
@@ -42,9 +42,9 @@ import (
|
|||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Several tests in this file are configurable by environment variables:
|
// Several tests in this file are configurable by environment variables:
|
||||||
@@ -114,7 +114,10 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "pv-recycler"
|
namespaceName := "pv-recycler"
|
||||||
|
|
||||||
testClient, ctrl, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
|
||||||
|
testClient, ctrl, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -125,10 +128,8 @@ func TestPersistentVolumeRecycler(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
go ctrl.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// This PV will be claimed, released, and recycled.
|
// This PV will be claimed, released, and recycled.
|
||||||
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
|
pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
|
||||||
@@ -170,7 +171,9 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "pv-deleter"
|
namespaceName := "pv-deleter"
|
||||||
|
|
||||||
testClient, ctrl, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, ctrl, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -181,10 +184,8 @@ func TestPersistentVolumeDeleter(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
go ctrl.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// This PV will be claimed, released, and deleted.
|
// This PV will be claimed, released, and deleted.
|
||||||
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
|
pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
|
||||||
@@ -231,7 +232,9 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "pv-bind-race"
|
namespaceName := "pv-bind-race"
|
||||||
|
|
||||||
testClient, ctrl, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, ctrl, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -242,10 +245,8 @@ func TestPersistentVolumeBindRace(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go ctrl.Run(tCtx)
|
||||||
go ctrl.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
|
||||||
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
|
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
|
||||||
@@ -302,7 +303,9 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "pvc-label-selector"
|
namespaceName := "pvc-label-selector"
|
||||||
|
|
||||||
testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -313,10 +316,8 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go controller.Run(tCtx)
|
||||||
go controller.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
@@ -384,7 +385,9 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "pvc-match-expressions"
|
namespaceName := "pvc-match-expressions"
|
||||||
|
|
||||||
testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -395,10 +398,8 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go controller.Run(tCtx)
|
||||||
go controller.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
@@ -485,7 +486,9 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "multi-pvs"
|
namespaceName := "multi-pvs"
|
||||||
|
|
||||||
testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -496,10 +499,8 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go controller.Run(tCtx)
|
||||||
go controller.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
maxPVs := getObjectCount()
|
maxPVs := getObjectCount()
|
||||||
pvs := make([]*v1.PersistentVolume, maxPVs)
|
pvs := make([]*v1.PersistentVolume, maxPVs)
|
||||||
@@ -576,7 +577,9 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "multi-pvs-pvcs"
|
namespaceName := "multi-pvs-pvcs"
|
||||||
|
|
||||||
testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -587,10 +590,8 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go binder.Run(tCtx)
|
||||||
go binder.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
objCount := getObjectCount()
|
objCount := getObjectCount()
|
||||||
pvs := make([]*v1.PersistentVolume, objCount)
|
pvs := make([]*v1.PersistentVolume, objCount)
|
||||||
@@ -742,7 +743,9 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
const shortSyncPeriod = 2 * time.Second
|
const shortSyncPeriod = 2 * time.Second
|
||||||
syncPeriod := getSyncPeriod(shortSyncPeriod)
|
syncPeriod := getSyncPeriod(shortSyncPeriod)
|
||||||
|
|
||||||
testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, shortSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, shortSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -801,10 +804,8 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start the controller when all PVs and PVCs are already saved in etcd
|
// Start the controller when all PVs and PVCs are already saved in etcd
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go binder.Run(tCtx)
|
||||||
go binder.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// wait for at least two sync periods for changes. No volume should be
|
// wait for at least two sync periods for changes. No volume should be
|
||||||
// Released and no claim should be Lost during this time.
|
// Released and no claim should be Lost during this time.
|
||||||
@@ -867,7 +868,9 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "provision-multi-pvs"
|
namespaceName := "provision-multi-pvs"
|
||||||
|
|
||||||
testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -890,10 +893,8 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass, metav1.CreateOptions{})
|
testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass, metav1.CreateOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go binder.Run(tCtx)
|
||||||
go binder.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
objCount := getObjectCount()
|
objCount := getObjectCount()
|
||||||
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
|
||||||
@@ -963,7 +964,9 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
namespaceName := "multi-pvs-diff-access"
|
namespaceName := "multi-pvs-diff-access"
|
||||||
|
|
||||||
testClient, controller, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -974,10 +977,8 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
|
|||||||
// non-namespaced objects (PersistenceVolumes).
|
// non-namespaced objects (PersistenceVolumes).
|
||||||
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go controller.Run(tCtx)
|
||||||
go controller.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// This PV will be claimed, released, and deleted
|
// This PV will be claimed, released, and deleted
|
||||||
pvRwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
pvRwo := createPV("pv-rwo", "/tmp/foo", "10G",
|
||||||
@@ -1048,7 +1049,9 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) {
|
|||||||
defaultStorageClassName := "gold"
|
defaultStorageClassName := "gold"
|
||||||
storageClassName := "silver"
|
storageClassName := "silver"
|
||||||
|
|
||||||
testClient, binder, informers, watchPV, watchPVC := createClients(namespaceName, t, s, defaultSyncPeriod)
|
tCtx := ktesting.Init(t)
|
||||||
|
defer tCtx.Cancel("test has completed")
|
||||||
|
testClient, binder, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod)
|
||||||
defer watchPV.Stop()
|
defer watchPV.Stop()
|
||||||
defer watchPVC.Stop()
|
defer watchPVC.Stop()
|
||||||
|
|
||||||
@@ -1078,10 +1081,8 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) {
|
|||||||
t.Errorf("Failed to create a storage class: %v", err)
|
t.Errorf("Failed to create a storage class: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
informers.Start(tCtx.Done())
|
||||||
informers.Start(ctx.Done())
|
go binder.Run(tCtx)
|
||||||
go binder.Run(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
klog.V(2).Infof("TestRetroactiveStorageClassAssignment: start")
|
klog.V(2).Infof("TestRetroactiveStorageClassAssignment: start")
|
||||||
|
|
||||||
@@ -1326,7 +1327,7 @@ func waitForPersistentVolumeClaimStorageClass(t *testing.T, claimName, scName st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.TestServer, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, informers.SharedInformerFactory, watch.Interface, watch.Interface) {
|
func createClients(ctx context.Context, namespaceName string, t *testing.T, s *kubeapiservertesting.TestServer, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, informers.SharedInformerFactory, watch.Interface, watch.Interface) {
|
||||||
// Use higher QPS and Burst, there is a test for race conditions which
|
// Use higher QPS and Burst, there is a test for race conditions which
|
||||||
// creates many objects and default values were too low.
|
// creates many objects and default values were too low.
|
||||||
binderConfig := restclient.CopyConfig(s.ClientConfig)
|
binderConfig := restclient.CopyConfig(s.ClientConfig)
|
||||||
@@ -1354,7 +1355,6 @@ func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.T
|
|||||||
plugins := []volume.VolumePlugin{plugin}
|
plugins := []volume.VolumePlugin{plugin}
|
||||||
cloud := &fakecloud.Cloud{}
|
cloud := &fakecloud.Cloud{}
|
||||||
informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod))
|
informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod))
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
|
||||||
ctrl, err := persistentvolumecontroller.NewController(
|
ctrl, err := persistentvolumecontroller.NewController(
|
||||||
ctx,
|
ctx,
|
||||||
persistentvolumecontroller.ControllerParameters{
|
persistentvolumecontroller.ControllerParameters{
|
||||||
|
@@ -27,7 +27,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
@@ -1121,8 +1120,7 @@ func initPVController(t *testing.T, testCtx *testutil.TestContext, provisionDela
|
|||||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||||
EnableDynamicProvisioning: true,
|
EnableDynamicProvisioning: true,
|
||||||
}
|
}
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
ctrl, err := persistentvolume.NewController(testCtx.Ctx, params)
|
||||||
ctrl, err := persistentvolume.NewController(ctx, params)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user