Merge pull request #116529 from pohly/controllers-with-name

kube-controller-manager: convert to structured logging
This commit is contained in:
Kubernetes Prow Robot
2023-03-14 14:12:55 -07:00
committed by GitHub
20 changed files with 165 additions and 131 deletions

View File

@@ -1274,7 +1274,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
case err != nil && statusErr != nil:
// If there was an error, and we failed to update status,
// log it and return the original error.
klog.Error(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
logger.Error(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
return err
case err != nil:
return err

View File

@@ -155,8 +155,7 @@ func (gc *GarbageCollector) Run(ctx context.Context, workers int) {
logger.Info("Starting controller", "controller", "garbagecollector")
defer logger.Info("Shutting down controller", "controller", "garbagecollector")
graphLogger := klog.LoggerWithName(logger, "graphbuilder")
go gc.dependencyGraphBuilder.Run(klog.NewContext(ctx, graphLogger))
go gc.dependencyGraphBuilder.Run(ctx)
if !cache.WaitForNamedCacheSync("garbage collector", ctx.Done(), func() bool {
return gc.dependencyGraphBuilder.IsSynced(logger)

View File

@@ -307,7 +307,7 @@ func NewNodeLifecycleController(
largeClusterThreshold int32,
unhealthyZoneThreshold float32,
) (*Controller, error) {
logger := klog.LoggerWithName(klog.FromContext(ctx), "NodeLifecycleController")
logger := klog.FromContext(ctx)
if kubeClient == nil {
logger.Error(nil, "kubeClient is nil when starting nodelifecycle Controller")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)

View File

@@ -227,8 +227,9 @@ func (ec *Controller) Run(ctx context.Context, workers int) {
defer runtime.HandleCrash()
defer ec.queue.ShutDown()
klog.Infof("Starting ephemeral volume controller")
defer klog.Infof("Shutting down ephemeral volume controller")
logger := klog.FromContext(ctx)
logger.Info("Starting ephemeral volume controller")
defer logger.Info("Shutting down ephemeral volume controller")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)

View File

@@ -166,13 +166,14 @@ func (e *TokensController) Run(ctx context.Context, workers int) {
return
}
klog.FromContext(ctx).V(5).Info("Starting workers")
logger := klog.FromContext(ctx)
logger.V(5).Info("Starting workers")
for i := 0; i < workers; i++ {
go wait.Until(e.syncServiceAccount, 0, ctx.Done())
go wait.Until(e.syncSecret, 0, ctx.Done())
go wait.UntilWithContext(ctx, e.syncServiceAccount, 0)
go wait.UntilWithContext(ctx, e.syncSecret, 0)
}
<-ctx.Done()
klog.FromContext(ctx).V(1).Info("Shutting down")
logger.V(1).Info("Shutting down")
}
func (e *TokensController) queueServiceAccountSync(obj interface{}) {
@@ -188,7 +189,7 @@ func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, new
}
// complete optionally requeues key, then calls queue.Done(key)
func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface, key interface{}, requeue bool) {
func (e *TokensController) retryOrForget(logger klog.Logger, queue workqueue.RateLimitingInterface, key interface{}, requeue bool) {
if !requeue {
queue.Forget(key)
return
@@ -200,7 +201,7 @@ func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface,
return
}
klog.V(4).Infof("retried %d times: %#v", requeueCount, key)
logger.V(4).Info("retried several times", "key", key, "count", requeueCount)
queue.Forget(key)
}
@@ -216,8 +217,8 @@ func (e *TokensController) queueSecretUpdateSync(oldObj interface{}, newObj inte
}
}
func (e *TokensController) syncServiceAccount() {
logger := klog.FromContext(context.TODO())
func (e *TokensController) syncServiceAccount(ctx context.Context) {
logger := klog.FromContext(ctx)
key, quit := e.syncServiceAccountQueue.Get()
if quit {
return
@@ -226,7 +227,7 @@ func (e *TokensController) syncServiceAccount() {
retry := false
defer func() {
e.retryOrForget(e.syncServiceAccountQueue, key, retry)
e.retryOrForget(logger, e.syncServiceAccountQueue, key, retry)
}()
saInfo, err := parseServiceAccountKey(key)
@@ -251,20 +252,20 @@ func (e *TokensController) syncServiceAccount() {
}
}
func (e *TokensController) syncSecret() {
func (e *TokensController) syncSecret(ctx context.Context) {
key, quit := e.syncSecretQueue.Get()
if quit {
return
}
defer e.syncSecretQueue.Done(key)
logger := klog.FromContext(ctx)
// Track whether or not we should retry this sync
retry := false
defer func() {
e.retryOrForget(e.syncSecretQueue, key, retry)
e.retryOrForget(logger, e.syncSecretQueue, key, retry)
}()
logger := klog.FromContext(context.TODO())
secretInfo, err := parseSecretQueueKey(key)
if err != nil {
logger.Error(err, "Parsing secret queue key")

View File

@@ -32,6 +32,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/test/utils/ktesting"
)
type testGenerator struct {
@@ -438,6 +439,8 @@ func TestTokenCreation(t *testing.T) {
for k, tc := range testcases {
t.Run(k, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
// Re-seed to reset name generation
utilrand.Seed(1)
@@ -497,10 +500,10 @@ func TestTokenCreation(t *testing.T) {
for {
if controller.syncServiceAccountQueue.Len() > 0 {
controller.syncServiceAccount()
controller.syncServiceAccount(ctx)
}
if controller.syncSecretQueue.Len() > 0 {
controller.syncSecret()
controller.syncSecret(ctx)
}
// The queues still have things to work on