migrated pkg/controller/endpointslice to contextual logging

Signed-off-by: Naman <namanlakhwani@gmail.com>
This commit is contained in:
Naman
2023-01-24 19:09:00 +05:30
parent c95b16b280
commit 09849b09cf
15 changed files with 186 additions and 132 deletions

View File

@@ -40,6 +40,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
@@ -60,7 +61,7 @@ type endpointSliceController struct {
serviceStore cache.Store
}
func newController(nodeNames []string, batchPeriod time.Duration) (*fake.Clientset, *endpointSliceController) {
func newController(t *testing.T, nodeNames []string, batchPeriod time.Duration) (*fake.Clientset, *endpointSliceController) {
client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
@@ -95,7 +96,9 @@ func newController(nodeNames []string, batchPeriod time.Duration) (*fake.Clients
return false, endpointSlice, nil
}))
_, ctx := ktesting.NewTestContext(t)
esController := NewController(
ctx,
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Services(),
nodeInformer,
@@ -122,7 +125,7 @@ func newController(nodeNames []string, batchPeriod time.Duration) (*fake.Clients
func TestSyncServiceNoSelector(t *testing.T) {
ns := metav1.NamespaceDefault
serviceName := "testing-1"
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
esController.serviceStore.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns},
Spec: v1.ServiceSpec{
@@ -130,7 +133,8 @@ func TestSyncServiceNoSelector(t *testing.T) {
},
})
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
logger, _ := ktesting.NewTestContext(t)
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
assert.NoError(t, err)
assert.Len(t, client.Actions(), 0)
}
@@ -187,7 +191,8 @@ func TestServiceExternalNameTypeSync(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
logger, _ := ktesting.NewTestContext(t)
pod := newPod(1, namespace, true, 0, false)
err := esController.podStore.Add(pod)
@@ -196,7 +201,7 @@ func TestServiceExternalNameTypeSync(t *testing.T) {
err = esController.serviceStore.Add(tc.service)
assert.NoError(t, err)
err = esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
err = esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
assert.NoError(t, err)
assert.Len(t, client.Actions(), 0)
@@ -212,7 +217,7 @@ func TestSyncServicePendingDeletion(t *testing.T) {
ns := metav1.NamespaceDefault
serviceName := "testing-1"
deletionTimestamp := metav1.Now()
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
esController.serviceStore.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns, DeletionTimestamp: &deletionTimestamp},
Spec: v1.ServiceSpec{
@@ -221,7 +226,8 @@ func TestSyncServicePendingDeletion(t *testing.T) {
},
})
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
logger, _ := ktesting.NewTestContext(t)
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
assert.NoError(t, err)
assert.Len(t, client.Actions(), 0)
}
@@ -230,7 +236,7 @@ func TestSyncServicePendingDeletion(t *testing.T) {
func TestSyncServiceWithSelector(t *testing.T) {
ns := metav1.NamespaceDefault
serviceName := "testing-1"
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
standardSyncService(t, esController, ns, serviceName)
expectActions(t, client.Actions(), 1, "create", "endpointslices")
@@ -250,7 +256,7 @@ func TestSyncServiceWithSelector(t *testing.T) {
// remove too much.
func TestSyncServiceMissing(t *testing.T) {
namespace := metav1.NamespaceDefault
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
// Build up existing service
existingServiceName := "stillthere"
@@ -269,7 +275,8 @@ func TestSyncServiceMissing(t *testing.T) {
missingServiceKey := endpointutil.ServiceKey{Name: missingServiceName, Namespace: namespace}
esController.triggerTimeTracker.ServiceStates[missingServiceKey] = endpointutil.ServiceState{}
err := esController.syncService(fmt.Sprintf("%s/%s", namespace, missingServiceName))
logger, _ := ktesting.NewTestContext(t)
err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, missingServiceName))
// nil should be returned when the service doesn't exist
assert.Nil(t, err, "Expected no error syncing service")
@@ -286,7 +293,7 @@ func TestSyncServiceMissing(t *testing.T) {
// Ensure SyncService correctly selects Pods.
func TestSyncServicePodSelection(t *testing.T) {
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
ns := metav1.NamespaceDefault
pod1 := newPod(1, ns, true, 0, false)
@@ -312,11 +319,12 @@ func TestSyncServicePodSelection(t *testing.T) {
}
func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
ns := metav1.NamespaceDefault
serviceName := "testing-1"
service := createService(t, esController, ns, serviceName)
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
logger, _ := ktesting.NewTestContext(t)
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
assert.Nil(t, err, "Expected no error syncing service")
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
@@ -345,8 +353,9 @@ func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
}
logger, _ = ktesting.NewTestContext(t)
numActionsBefore := len(client.Actions())
err = esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
assert.Nil(t, err, "Expected no error syncing service")
// The EndpointSlice marked for deletion should be ignored by the controller, and thus
@@ -358,7 +367,7 @@ func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
// Ensure SyncService correctly selects and labels EndpointSlices.
func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
ns := metav1.NamespaceDefault
serviceName := "testing-1"
service := createService(t, esController, ns, serviceName)
@@ -435,7 +444,8 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
}
numActionsBefore := len(client.Actions())
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
logger, _ := ktesting.NewTestContext(t)
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
assert.Nil(t, err, "Expected no error syncing service")
if len(client.Actions()) != numActionsBefore+2 {
@@ -451,7 +461,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
}
func TestOnEndpointSliceUpdate(t *testing.T) {
_, esController := newController([]string{"node-1"}, time.Duration(0))
_, esController := newController(t, []string{"node-1"}, time.Duration(0))
ns := metav1.NamespaceDefault
serviceName := "testing-1"
epSlice1 := &discovery.EndpointSlice{
@@ -466,11 +476,12 @@ func TestOnEndpointSliceUpdate(t *testing.T) {
AddressType: discovery.AddressTypeIPv4,
}
logger, _ := ktesting.NewTestContext(t)
epSlice2 := epSlice1.DeepCopy()
epSlice2.Labels[discovery.LabelManagedBy] = "something else"
assert.Equal(t, 0, esController.queue.Len())
esController.onEndpointSliceUpdate(epSlice1, epSlice2)
esController.onEndpointSliceUpdate(logger, epSlice1, epSlice2)
err := wait.PollImmediate(100*time.Millisecond, 3*time.Second, func() (bool, error) {
if esController.queue.Len() > 0 {
return true, nil
@@ -1214,7 +1225,7 @@ func TestSyncService(t *testing.T) {
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
client, esController := newController([]string{"node-1"}, time.Duration(0))
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
for _, pod := range testcase.pods {
esController.podStore.Add(pod)
@@ -1224,7 +1235,8 @@ func TestSyncService(t *testing.T) {
_, err := esController.client.CoreV1().Services(testcase.service.Namespace).Create(context.TODO(), testcase.service, metav1.CreateOptions{})
assert.Nil(t, err, "Expected no error creating service")
err = esController.syncService(fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name))
logger, _ := ktesting.NewTestContext(t)
err = esController.syncService(logger, fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name))
assert.Nil(t, err)
// last action should be to create endpoint slice
@@ -1318,11 +1330,12 @@ func TestPodAddsBatching(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ns := metav1.NamespaceDefault
client, esController := newController([]string{"node-1"}, tc.batchPeriod)
client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
stopCh := make(chan struct{})
defer close(stopCh)
go esController.Run(1, stopCh)
_, ctx := ktesting.NewTestContext(t)
go esController.Run(ctx, 1)
esController.serviceStore.Add(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
@@ -1452,11 +1465,12 @@ func TestPodUpdatesBatching(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ns := metav1.NamespaceDefault
client, esController := newController([]string{"node-1"}, tc.batchPeriod)
client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
stopCh := make(chan struct{})
defer close(stopCh)
go esController.Run(1, stopCh)
_, ctx := ktesting.NewTestContext(t)
go esController.Run(ctx, 1)
addPods(t, esController, ns, tc.podsCount)
@@ -1589,11 +1603,12 @@ func TestPodDeleteBatching(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ns := metav1.NamespaceDefault
client, esController := newController([]string{"node-1"}, tc.batchPeriod)
client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
stopCh := make(chan struct{})
defer close(stopCh)
go esController.Run(1, stopCh)
_, ctx := ktesting.NewTestContext(t)
go esController.Run(ctx, 1)
addPods(t, esController, ns, tc.podsCount)
@@ -1655,7 +1670,7 @@ func TestSyncServiceStaleInformer(t *testing.T) {
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
_, esController := newController([]string{"node-1"}, time.Duration(0))
_, esController := newController(t, []string{"node-1"}, time.Duration(0))
ns := metav1.NamespaceDefault
serviceName := "testing-1"
@@ -1691,7 +1706,8 @@ func TestSyncServiceStaleInformer(t *testing.T) {
epSlice2.Generation = testcase.trackerGenerationNumber
esController.endpointSliceTracker.Update(epSlice2)
err = esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
logger, _ := ktesting.NewTestContext(t)
err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
// Check if we got a StaleInformerCache error
if endpointsliceutil.IsStaleInformerCacheErr(err) != testcase.expectError {
t.Fatalf("Expected error because informer cache is outdated")
@@ -1835,7 +1851,7 @@ func Test_checkNodeTopologyDistribution(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
_, esController := newController([]string{}, time.Duration(0))
_, esController := newController(t, []string{}, time.Duration(0))
for i, nodeInfo := range tc.nodes {
node := &v1.Node{
@@ -1869,7 +1885,8 @@ func Test_checkNodeTopologyDistribution(t *testing.T) {
}
}
esController.checkNodeTopologyDistribution()
logger, _ := ktesting.NewTestContext(t)
esController.checkNodeTopologyDistribution(logger)
if esController.queue.Len() != tc.expectedQueueLen {
t.Errorf("Expected %d services to be queued, got %d", tc.expectedQueueLen, esController.queue.Len())
@@ -1890,7 +1907,7 @@ func TestUpdateNode(t *testing.T) {
},
},
}
_, esController := newController(nil, time.Duration(0))
_, esController := newController(t, nil, time.Duration(0))
sliceInfo := &topologycache.SliceInfo{
ServiceKey: "ns/svc",
AddressType: discovery.AddressTypeIPv4,
@@ -1928,12 +1945,13 @@ func TestUpdateNode(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "node-2"},
Status: nodeReadyStatus,
}
logger, _ := ktesting.NewTestContext(t)
esController.nodeStore.Add(node1)
esController.nodeStore.Add(node2)
esController.addNode(node1)
esController.addNode(node2)
esController.addNode(logger, node1)
esController.addNode(logger, node2)
// The Nodes don't have the zone label, AddHints should fail.
_, _, eventsBuilders := esController.topologyCache.AddHints(sliceInfo)
_, _, eventsBuilders := esController.topologyCache.AddHints(logger, sliceInfo)
require.Len(t, eventsBuilders, 1)
assert.Contains(t, eventsBuilders[0].Message, topologycache.InsufficientNodeInfo)
@@ -1945,9 +1963,9 @@ func TestUpdateNode(t *testing.T) {
// After adding the zone label to the Nodes and calling the event handler updateNode, AddHints should succeed.
esController.nodeStore.Update(updateNode1)
esController.nodeStore.Update(updateNode2)
esController.updateNode(node1, updateNode1)
esController.updateNode(node2, updateNode2)
_, _, eventsBuilders = esController.topologyCache.AddHints(sliceInfo)
esController.updateNode(logger, node1, updateNode1)
esController.updateNode(logger, node2, updateNode2)
_, _, eventsBuilders = esController.topologyCache.AddHints(logger, sliceInfo)
require.Len(t, eventsBuilders, 1)
assert.Contains(t, eventsBuilders[0].Message, topologycache.TopologyAwareHintsEnabled)
}
@@ -1965,7 +1983,8 @@ func standardSyncService(t *testing.T, esController *endpointSliceController, na
t.Helper()
createService(t, esController, namespace, serviceName)
err := esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
logger, _ := ktesting.NewTestContext(t)
err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
assert.Nil(t, err, "Expected no error syncing service")
}