kube-controller-manager: convert to structured logging
Most of the individual controllers were already converted earlier. Some log calls were missed or added and then not updated during a rebase. Some of those get updated here to fill those gaps. Adding of the name to the logger used by each controller gets consolidated in this commit. By using the name under which the controller is registered we ensure that the names in the log are consistent.
This commit is contained in:
@@ -1274,7 +1274,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
|
||||
case err != nil && statusErr != nil:
|
||||
// If there was an error, and we failed to update status,
|
||||
// log it and return the original error.
|
||||
klog.Error(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
|
||||
logger.Error(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
|
||||
return err
|
||||
case err != nil:
|
||||
return err
|
||||
|
@@ -155,8 +155,7 @@ func (gc *GarbageCollector) Run(ctx context.Context, workers int) {
|
||||
logger.Info("Starting controller", "controller", "garbagecollector")
|
||||
defer logger.Info("Shutting down controller", "controller", "garbagecollector")
|
||||
|
||||
graphLogger := klog.LoggerWithName(logger, "graphbuilder")
|
||||
go gc.dependencyGraphBuilder.Run(klog.NewContext(ctx, graphLogger))
|
||||
go gc.dependencyGraphBuilder.Run(ctx)
|
||||
|
||||
if !cache.WaitForNamedCacheSync("garbage collector", ctx.Done(), func() bool {
|
||||
return gc.dependencyGraphBuilder.IsSynced(logger)
|
||||
|
@@ -307,7 +307,7 @@ func NewNodeLifecycleController(
|
||||
largeClusterThreshold int32,
|
||||
unhealthyZoneThreshold float32,
|
||||
) (*Controller, error) {
|
||||
logger := klog.LoggerWithName(klog.FromContext(ctx), "NodeLifecycleController")
|
||||
logger := klog.FromContext(ctx)
|
||||
if kubeClient == nil {
|
||||
logger.Error(nil, "kubeClient is nil when starting nodelifecycle Controller")
|
||||
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
|
||||
|
@@ -227,8 +227,9 @@ func (ec *Controller) Run(ctx context.Context, workers int) {
|
||||
defer runtime.HandleCrash()
|
||||
defer ec.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting ephemeral volume controller")
|
||||
defer klog.Infof("Shutting down ephemeral volume controller")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Starting ephemeral volume controller")
|
||||
defer logger.Info("Shutting down ephemeral volume controller")
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
@@ -166,13 +166,14 @@ func (e *TokensController) Run(ctx context.Context, workers int) {
|
||||
return
|
||||
}
|
||||
|
||||
klog.FromContext(ctx).V(5).Info("Starting workers")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(5).Info("Starting workers")
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(e.syncServiceAccount, 0, ctx.Done())
|
||||
go wait.Until(e.syncSecret, 0, ctx.Done())
|
||||
go wait.UntilWithContext(ctx, e.syncServiceAccount, 0)
|
||||
go wait.UntilWithContext(ctx, e.syncSecret, 0)
|
||||
}
|
||||
<-ctx.Done()
|
||||
klog.FromContext(ctx).V(1).Info("Shutting down")
|
||||
logger.V(1).Info("Shutting down")
|
||||
}
|
||||
|
||||
func (e *TokensController) queueServiceAccountSync(obj interface{}) {
|
||||
@@ -188,7 +189,7 @@ func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, new
|
||||
}
|
||||
|
||||
// complete optionally requeues key, then calls queue.Done(key)
|
||||
func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface, key interface{}, requeue bool) {
|
||||
func (e *TokensController) retryOrForget(logger klog.Logger, queue workqueue.RateLimitingInterface, key interface{}, requeue bool) {
|
||||
if !requeue {
|
||||
queue.Forget(key)
|
||||
return
|
||||
@@ -200,7 +201,7 @@ func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface,
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).Infof("retried %d times: %#v", requeueCount, key)
|
||||
logger.V(4).Info("retried several times", "key", key, "count", requeueCount)
|
||||
queue.Forget(key)
|
||||
}
|
||||
|
||||
@@ -216,8 +217,8 @@ func (e *TokensController) queueSecretUpdateSync(oldObj interface{}, newObj inte
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TokensController) syncServiceAccount() {
|
||||
logger := klog.FromContext(context.TODO())
|
||||
func (e *TokensController) syncServiceAccount(ctx context.Context) {
|
||||
logger := klog.FromContext(ctx)
|
||||
key, quit := e.syncServiceAccountQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
@@ -226,7 +227,7 @@ func (e *TokensController) syncServiceAccount() {
|
||||
|
||||
retry := false
|
||||
defer func() {
|
||||
e.retryOrForget(e.syncServiceAccountQueue, key, retry)
|
||||
e.retryOrForget(logger, e.syncServiceAccountQueue, key, retry)
|
||||
}()
|
||||
|
||||
saInfo, err := parseServiceAccountKey(key)
|
||||
@@ -251,20 +252,20 @@ func (e *TokensController) syncServiceAccount() {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TokensController) syncSecret() {
|
||||
func (e *TokensController) syncSecret(ctx context.Context) {
|
||||
key, quit := e.syncSecretQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer e.syncSecretQueue.Done(key)
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
// Track whether or not we should retry this sync
|
||||
retry := false
|
||||
defer func() {
|
||||
e.retryOrForget(e.syncSecretQueue, key, retry)
|
||||
e.retryOrForget(logger, e.syncSecretQueue, key, retry)
|
||||
}()
|
||||
|
||||
logger := klog.FromContext(context.TODO())
|
||||
secretInfo, err := parseSecretQueueKey(key)
|
||||
if err != nil {
|
||||
logger.Error(err, "Parsing secret queue key")
|
||||
|
@@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
type testGenerator struct {
|
||||
@@ -438,6 +439,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
for k, tc := range testcases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
|
||||
// Re-seed to reset name generation
|
||||
utilrand.Seed(1)
|
||||
|
||||
@@ -497,10 +500,10 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
for {
|
||||
if controller.syncServiceAccountQueue.Len() > 0 {
|
||||
controller.syncServiceAccount()
|
||||
controller.syncServiceAccount(ctx)
|
||||
}
|
||||
if controller.syncSecretQueue.Len() > 0 {
|
||||
controller.syncSecret()
|
||||
controller.syncSecret(ctx)
|
||||
}
|
||||
|
||||
// The queues still have things to work on
|
||||
|
Reference in New Issue
Block a user